text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# File I/O class
# A wrapper around the NetCDF4 library, used by
# BOUT++ routines. This allows easily changing
# methods later.
#
# NOTE: NetCDF includes unlimited dimensions,
# but this library is just for very simple
# I/O operations. Educated guesses are made
# for the dimensions.
try:
import numpy as np
except ImportError:
print "ERROR: NumPy module not available"
raise
library = None # Record which library to use
try:
from netCDF4 import Dataset
library = "netCDF4"
except ImportError:
print "netcdf4-python module not found"
try:
from Scientific.IO.NetCDF import NetCDFFile as Dataset
from Scientific.N import Int, Float
library = "Scientific"
except ImportError:
print "ERROR: Scientific.IO.NetCDF module not found"
raise
import time
def getUserName():
try:
import os, pwd, string
except ImportError:
return 'unknown user'
pwd_entry = pwd.getpwuid(os.getuid())
name = string.strip(string.splitfields(pwd_entry[4], ',')[0])
if name == '':
name = pwd_entry[0]
return name
class DataFile:
handle = None
def open(self, filename, write=False, create=False,
format='NETCDF3_CLASSIC'):
if (not write) and (not create):
self.handle = Dataset(filename, "r")
elif create:
if library == "Scientific":
self.handle = Dataset(filename, "w",
'Created ' + time.ctime(time.time())
+ ' by ' + getUserName())
else:
self.handle = Dataset(filename, "w", format=format)
else:
self.handle = Dataset(filename, "a")
def close(self):
if self.handle != None:
self.handle.close()
self.handle = None
def __init__(self, filename=None):
if filename != None:
self.open(filename)
def __del__(self):
self.close()
def read(self, name):
"""Read a variable from the file."""
if self.handle == None: return None
try:
var = self.handle.variables[name]
except KeyError:
# Not found. Try to find using case-insensitive search
var = None
for n in self.handle.variables.keys():
if n.lower() == name.lower():
print "WARNING: Reading '"+n+"' instead of '"+name+"'"
var = self.handle.variables[n]
if var == None:
return None
ndims = len(var.dimensions)
if ndims == 0:
data = var.getValue()
return data[0]
else:
return var[:]
def list(self):
"""List all variables in the file."""
if self.handle == None: return []
return self.handle.variables.keys()
def ndims(self, varname):
"""Number of dimensions for a variable."""
if self.handle == None: return None
try:
var = self.handle.variables[varname]
except KeyError:
return None
return len(var.dimensions)
def size(self, varname):
"""List of dimension sizes for a variable."""
if self.handle == None: return []
try:
var = self.handle.variables[varname]
except KeyError:
return []
return map(lambda d: len(self.handle.dimensions[d]), var.dimensions)
def write(self, name, data):
"""Writes a variable to file, making guesses for the dimensions"""
s = np.shape(data)
# Get the variable type
t = type(data).__name__
if t == 'NoneType':
print "DataFile: None passed as data to write. Ignoring"
return
if t == 'ndarray':
# Numpy type
t = data.dtype.str
try:
# See if the variable already exists
var = self.handle.variables[name]
# Check the shape of the variable
if var.shape != s:
print "Datafile: Variable already exists with different size: "+ name
raise
except KeyError:
# Not found, so add.
# Get dimensions
defdims = [(),
('x',),
('x','y'),
('x','y','z'),
('t','x','y','z')]
def find_dim(dim):
# Find a dimension with given name and size
size, name = dim
# See if it exists already
try:
d = self.handle.dimensions[name]
# Check if it's the correct size
if len(d) == size:
return name
# Find another with the correct size
for dn, d in self.handle.dimensions.iteritems():
if len(d) == size:
return dn
# None found, so create a new one
i = 2
while True:
dn = name + str(i)
try:
d = self.handle.dimensions[dn]
# Already exists, so keep going
except KeyError:
# Not found. Create
print "Defining dimension "+ dn + " of size %d" % size
self.handle.createDimension(dn, size)
return dn
i = i + 1
except KeyError:
# Doesn't exist, so add
print "Defining dimension "+ name + " of size %d" % size
self.handle.createDimension(name, size)
return name
# List of (size, 'name') tuples
dlist = zip(s, defdims[len(s)])
# Get new list of variables, and turn into a tuple
dims = tuple( map(find_dim, dlist) )
# Create the variable
if library == "Scientific":
if t == 'int':
print "Integer"
tc = Int
else:
tc = Float
var = self.handle.createVariable(name, tc, dims)
else:
var = self.handle.createVariable(name, t, dims)
# Write the data
var[:] = data
#var = data
|
bendudson/BOUT-1.0
|
tools/pylib/boututils/datafile.py
|
Python
|
gpl-3.0
| 6,594
|
[
"NetCDF"
] |
7d258ad1ad7bfe3f50ecba250e705edcd6ff6b1e3205f899449ff29c70df0602
|
"""
Class description of a DNA chain built of base pairs
"""
from typing import List, Tuple, Union
from copy import deepcopy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D # NOQA
from scipy.interpolate import interp1d
try:
from mayavi import mlab
maya_imported = True
except ImportError:
maya_imported = False
print("Could not import mayavi libraries, 3d plotting is disabled")
from fractaldna.dna_models import basepair
from fractaldna.utils import rotations as r
from fractaldna.utils.constants import BP_ROTATION, BP_SEPARATION
class PlottableSequence:
"""
This is an inheritable class that gives DNA chains plotting methods and
output methods.
"""
def to_text(self, seperator: str = " ") -> str:
"""
Return a description of the molecules in the chain as text
:param seperator: column seperator
"""
key = (
"#NAME SHAPE CHAIN_ID STRAND_ID BP_INDEX "
+ "SIZE_X SIZE_Y SIZE_Z POS_X "
+ "POS_Y POS_Z ROT_X ROT_Y ROT_Z\n"
)
output = [key.replace(" ", seperator)]
for pair in self.basepairs:
output.append(pair.to_text(seperator=seperator))
return "".join(output)
def to_frame(self) -> pd.DataFrame:
"""
Return the molecules as a pandas data frame
:return: Pandas data frame with molecule information
"""
return pd.concat(
[pair.to_frame() for pair in self.basepairs], ignore_index=False, sort=False
)
def to_plot(
self, plot_p: bool = True, plot_b: bool = True, plot_s: bool = True
) -> matplotlib.figure.Figure:
"""
Return a matplotlib.Figure instance with molecules plotted
:param plot_p: Show Phosphates in plot
:param plot_b: Show Bases in plot
:param plot_s: Show sugars in plot
:return: Matplotlib Figure
"""
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "sugar":
sugars.append(molecule.position)
elif molecule.name.lower() == "phosphate":
triphosphates.append(molecule.position)
elif molecule.name.lower() in bps:
bases.append(molecule.position)
# Plotting
empty = [[], [], []]
bases = [ii for ii in zip(*map(list, bases))] if plot_b else empty
triphosphates = (
[ii for ii in zip(*map(list, triphosphates))] if plot_p else empty
)
sugars = [ii for ii in zip(*map(list, sugars))] if plot_s else empty
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(bases[0], bases[1], bases[2], c="0.6", s=20)
ax.scatter(triphosphates[0], triphosphates[1], triphosphates[2], c="y", s=20)
ax.scatter(sugars[0], sugars[1], sugars[2], c="r", s=20)
return fig
def to_surface_plot(self) -> matplotlib.figure.Figure:
"""
Plot the surfaces of each molecule in the chain.
Avoid this with large chains, this assumes each molecule is an ellipse
:return: Matplotlib figure (contour plot)
"""
def ellipse_xyz(center, extent, rotation=np.zeros([3])):
rmatrix = r.eulerMatrix(*rotation)
[a, b, c] = extent
u, v = np.mgrid[0 : 2 * np.pi : 10j, 0 : np.pi : 5j]
x = a * np.cos(u) * np.sin(v) + center[0]
y = b * np.sin(u) * np.sin(v) + center[1]
z = c * np.cos(v) + center[2]
for ii in range(0, len(x)):
for jj in range(0, len(x[ii])):
row = np.array([x[ii][jj], y[ii][jj], z[ii][jj]]) - center
xp, yp, zp = np.dot(rmatrix, row.transpose())
x[ii][jj] = xp + center[0]
y[ii][jj] = yp + center[1]
z[ii][jj] = zp + center[2]
return x, y, z
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "sugar":
sugars.append(
(molecule.position, molecule.dimensions, molecule.rotation)
)
elif molecule.name.lower() == "phosphate":
triphosphates.append(
(molecule.position, molecule.dimensions, molecule.rotation)
)
elif molecule.name.lower() in bps:
bases.append(
(molecule.position, molecule.dimensions, molecule.rotation)
)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for base in bases:
x, y, z = ellipse_xyz(base[0], base[1], rotation=base[2])
ax.plot_wireframe(x, y, z, color="0.6")
for phosphate in triphosphates:
x, y, z = ellipse_xyz(phosphate[0], phosphate[1], rotation=phosphate[2])
ax.plot_wireframe(x, y, z, color="y")
for sugar in sugars:
x, y, z = ellipse_xyz(sugar[0], sugar[1], rotation=sugar[2])
ax.plot_wireframe(x, y, z, color="r")
return fig
def to_line_plot(self, size: Tuple[int, int] = (400, 350)):
"""
Return a mayavi figure instance with histone and linkers shown
:param size: Figure size (width, height)
:return: mayavi figure
:raises ImportError: MayaVi likely Not installed
"""
if not maya_imported:
raise ImportError("MayaVi could not be imported")
if maya_imported is True:
fig = mlab.figure(bgcolor=(1.0, 1.0, 1.0), size=size)
if hasattr(self, "histones"):
# ax = fig.add_subplot(111, projection='3d')
histones = []
for histone in self.histones:
pos = np.array([bp.position for bp in histone.basepairs])
mlab.plot3d(
pos[:, 0],
pos[:, 1],
pos[:, 2],
color=(1.0, 0.8, 0),
tube_radius=11.5,
)
histones.append(histone.position)
histones = np.array(histones)
mlab.points3d(
histones[:, 0],
histones[:, 1],
histones[:, 2],
color=(0, 0, 1.0),
opacity=0.4,
scale_factor=70,
)
for linker in self.linkers:
pos = np.array([bp.position for bp in linker.basepairs])
mlab.plot3d(
pos[:, 0],
pos[:, 1],
pos[:, 2],
color=(0, 0.8, 0),
tube_radius=11.5,
)
else:
chains = set([bp.chain for bp in self.basepairs])
for chain in chains:
pos = np.array(
[
bp.position
for bp in filter(lambda x: x.chain == chain, self.basepairs)
]
)
mlab.plot3d(
pos[:, 0],
pos[:, 1],
pos[:, 2],
color=(1.0, 0, 0),
tube_radius=11.5,
)
return fig
else:
print("MayaVi not imporrted, cannot produce this plot")
return None
def to_strand_plot(self, plot_p=True, plot_b=True, plot_s=True, plot_bp=False):
"""
Return a mayavi figure instance with strands plotted
:param plot_p : plot phosphate strands
:param plot_s : plot sugar strands
:param plot_b : plot base strands
:param plot_bp : join base pairs together
:return: Mayavi Figure
:raises ImportError: MayaVi not imported
"""
if not maya_imported:
raise ImportError("MayaVi Not Imported")
if maya_imported is True:
fig = mlab.figure(bgcolor=(1.0, 1.0, 1.0))
chains = set([bp.chain for bp in self.basepairs])
for chain in chains:
basepairs = [
bp for bp in filter(lambda x: x.chain == chain, self.basepairs)
]
sugar_l = []
sugar_r = []
phosphate_l = []
phosphate_r = []
base_l = []
base_r = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "sugar":
if molecule.strand == 0:
sugar_l.append(molecule.position)
elif molecule.strand == 1:
sugar_r.append(molecule.position)
elif molecule.name.lower() == "phosphate":
if molecule.strand == 0:
phosphate_l.append(molecule.position)
elif molecule.strand == 1:
phosphate_r.append(molecule.position)
elif molecule.name.lower() in bps:
if molecule.strand == 0:
base_l.append(molecule.position)
elif molecule.strand == 1:
base_r.append(molecule.position)
# Plotting
base_l = [ii for ii in zip(*map(list, base_l))]
base_r = [ii for ii in zip(*map(list, base_r))]
phosphate_l = [ii for ii in zip(*map(list, phosphate_l))]
phosphate_r = [ii for ii in zip(*map(list, phosphate_r))]
sugar_l = [ii for ii in zip(*map(list, sugar_l))]
sugar_r = [ii for ii in zip(*map(list, sugar_r))]
if plot_b:
mlab.plot3d(
base_l[0],
base_l[1],
base_l[2],
color=(0.6, 0.6, 0.6),
tube_radius=1,
)
mlab.plot3d(
base_r[0],
base_r[1],
base_r[2],
color=(0.6, 0.6, 0.6),
tube_radius=1,
)
if plot_s:
mlab.plot3d(
sugar_l[0],
sugar_l[1],
sugar_l[2],
color=(1.0, 0, 0),
tube_radius=1,
)
mlab.plot3d(
sugar_r[0],
sugar_r[1],
sugar_r[2],
color=(1.0, 0, 0),
tube_radius=1,
)
if plot_p:
mlab.plot3d(
phosphate_l[0],
phosphate_l[1],
phosphate_l[2],
color=(1, 1, 0),
tube_radius=1,
)
mlab.plot3d(
phosphate_r[0],
phosphate_r[1],
phosphate_r[2],
color=(1, 1, 0),
tube_radius=1,
)
if plot_bp:
# plot bars joining base pairs
for ii in range(0, len(base_l[0])):
xs = (
phosphate_l[0][ii],
sugar_l[0][ii],
base_l[0][ii],
base_r[0][ii],
sugar_r[0][ii],
phosphate_r[0][ii],
)
ys = (
phosphate_l[1][ii],
sugar_l[1][ii],
base_l[1][ii],
base_r[1][ii],
sugar_r[1][ii],
phosphate_r[1][ii],
)
zs = (
phosphate_l[2][ii],
sugar_l[2][ii],
base_l[2][ii],
base_r[2][ii],
sugar_r[2][ii],
phosphate_r[2][ii],
)
mlab.plot3d(xs, ys, zs, color=(1, 1, 1), tube_radius=0.5)
return fig
else:
print("MayaVi not imporrted, cannot produce this plot")
return None
class SplineLinker(PlottableSequence):
"""
*Inherits from PlottableSequence*
Link two histones together via a cubic spline
linker = SplineLinker(bp1, bp2, bp3, bp4, curviness=1., zrot=None)
Create base pairs that link to sections of DNA as follows:
bp1 bp2 <==== LINKER =====> bp3 bp4
Two base pairs on either side of the linker are needed to build splines
low curviness = straighter
high_curviness = smoother
startkey and stopkey act as keyframes for rotations
:param zrot:
Describes the twist of bp3 relative to bp2
- None: Determine automatically (experimental)
- double: rotation in radians (mod 2*pi)
:param startkey:
Starting keyframe for rotations
- key = i will start/stop rotations after the i-th base pair
- key = -i will start/stop rotations after the i-th last base pair
:param stopkey:
Ending keyframe for rotations
- key = i will start/stop rotations after the i-th base pair
- key = -i will start/stop rotations after the i-th last base pair
:param method:
Method to handle rotational interpolation
- "quaternion": Full use of quaternions
- "corrected_quaternion": Full use of quaternions, with a correction
to check that the base pair is aligned.
This method is recommended
- "matrix": Experimental method that doesn't use
quaternions. Currently incorrect.
:param chain:
Chain index assigned to linker and basepairs created therein
"""
linker_rotation = BP_ROTATION # rad, default screw rotation of dna
linker_bp_spacing = BP_SEPARATION # angstrom, default spacing between bps
def __init__(
self,
bp1: basepair.BasePair,
bp2: basepair.BasePair,
bp3: basepair.BasePair,
bp4: basepair.BasePair,
curviness: float = 1.0,
zrot: float = None,
startkey: int = None,
stopkey: int = None,
method: str = "corrected_quaternion",
chain: int = 0,
):
"""
Constructor
"""
assert method in [
"quaternion",
"matrix",
"corrected_quaternion",
], "Invalid interpolation method"
self.basepairs = []
self.chain = chain
points = np.array([bp1.position, bp2.position, bp3.position, bp4.position])
# start_x = bp2.rmatrix[:, 0]
# end_x = bp3.rmatrix[:, 0]
# this line is incorrect. We need to transfer the two rmatrices into
# the same frame
# relative_angle = np.arccos(np.sum(start_x*end_x) /
# np.sum(start_x**2)**.5 /
# np.sum(end_x**2)**.5)
# d = np.sum((bp2.position - bp3.position)**2)**.5
if curviness <= 0:
curviness = 1e-200
diff = 3.4 / 180 / curviness
t = np.array([1 - diff, 1, 2, 2 + diff])
x_interp = interp1d(t, points[:, 0], kind="cubic")
y_interp = interp1d(t, points[:, 1], kind="cubic")
z_interp = interp1d(t, points[:, 2], kind="cubic")
self.x_interp = x_interp
self.y_interp = y_interp
self.z_interp = z_interp
# calculate length
tt = np.linspace(1, 2, 1000)
xx = x_interp(tt)
yy = y_interp(tt)
zz = z_interp(tt)
dx = xx[1:] - xx[: (len(xx) - 1)]
dy = yy[1:] - yy[: (len(yy) - 1)]
dz = zz[1:] - zz[: (len(zz) - 1)]
length = sum((dx**2 + dy**2 + dz**2) ** 0.5)
n = length // self.linker_bp_spacing
self.spacing = length / n
# print(self.spacing)
tt = np.linspace(1, 2, int(n))
xx = x_interp(tt[1 : len(tt)])
yy = y_interp(tt[1 : len(tt)])
zz = z_interp(tt[1 : len(tt)])
interpolator = lambda t: np.array(
[x_interp(t + 1), y_interp(t + 1), z_interp(t + 1)]
)
if startkey is None:
startkey = 0
elif startkey < 0:
startkey = len(tt) - abs(startkey) - 1
if stopkey is None:
stopkey = len(tt) - 1
elif stopkey < 0:
stopkey = len(tt) - abs(stopkey)
# total rotation that the BP undergoes, relative to initial
rotation_unmodified = (-n * self.linker_rotation) % (2 * np.pi)
if zrot is None:
zrot = 0
# desired rotation relative to initial
zrot = zrot % (2 * np.pi)
diff = zrot - rotation_unmodified
if diff < -np.pi:
diff += 2 * np.pi
elif diff > np.pi:
diff -= 2 * np.pi
rot_angle = self.linker_rotation + diff / n
# print("\n", n, diff*180/np.pi, rot_angle*180/np.pi)
# print("Desired rotation", zrot*180/np.pi)
# print("Default rotation", rotation_unmodified*180/np.pi)
# print("Final rotation", ((rot_angle*n) % (2*np.pi))*180/np.pi)
# Run one loop to generate a series of rotation matrices
for (ii, (_x, _y, _z)) in enumerate(zip(xx, yy, zz)):
if ii != len(xx) - 1:
pos = np.array([_x, _y, _z])
bp = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=[0, 0, 0],
rotation=[0, 0, 0],
index=ii,
)
if method in ["quaternion", "corrected_quaternion"]:
start_quaternion = r.quaternion_from_matrix(bp2.rmatrix)
if ii < startkey:
ll = 0
elif ii >= stopkey:
ll = 1
else:
ll = (ii - startkey + 1.0) / float(stopkey - startkey)
if method == "corrected_quaternion":
# get interpolated rotation matrix
end_quaternion = r.quaternion_from_matrix(bp3.rmatrix)
quat = r.quaternion_slerp(
start_quaternion, end_quaternion, ll, shortestpath=True
)
rmat = r.quaternion_matrix(quat)
# correct z
z = interpolator((ii + 1) / len(xx)) - interpolator(
ii / len(xx)
)
z = -z / np.linalg.norm(z)
z_current = rmat[:, 2]
perp = np.cross(z_current, z)
angle = np.arccos(
np.dot(z, z_current)
/ (np.linalg.norm(z) * np.linalg.norm(z_current))
)
r2 = r.rot_ax_angle(perp, angle)
rmat = np.dot(r2, rmat)
# new_z = bp3.rmatrix[:, 2]
# old_x = bp2.rmatrix[:, 0]
# old_y = bp2.rmatrix[:, 1]
# a = 1
# b = -np.dot(new_z, old_y) / np.dot(new_z, old_x)
# perp_x = a*old_x + b*old_y
# perp_x /= np.linalg.norm(perp_x)
# perp_y = np.cross(new_z, perp_x)
# end_matrix =\
# np.array([perp_x, perp_y, new_z]).transpose()
# end_quaternion = r.quaternion_from_matrix(end_matrix)
else:
end_quaternion = r.quaternion_from_matrix(bp3.rmatrix)
quat = r.quaternion_slerp(
start_quaternion, end_quaternion, ll, shortestpath=True
)
rmat = r.quaternion_matrix(quat)
elif method == "matrix":
ll = ii / len(xx)
rmat = r.matrix_interpolate(
bp2.rmatrix, bp3.rmatrix, interpolator, ll, precision=0.01
)
bp.rotate(rmat)
spin = rot_angle * (ii + 1)
bp.rotate(r.rot_ax_angle(rmat[:, 2], spin))
bp.translate(pos)
self.basepairs.append(bp)
return None
def translate(self, translation):
"""Translate the histone spatially
:param translation: 3-vector for translation
"""
for bp in self.basepairs:
bp.translate(translation)
return None
def setChain(self, chainIdx):
"""Set the Chain Index of all base pairs in the histone
:param chainIdx: Index for Chain
"""
self.chain = chainIdx
for bp in self.basepairs:
bp.setNewChain(chainIdx)
return None
class Histone(PlottableSequence):
"""
*Inherits from PlottableSequence*
This class defines a histone.
:param position: 3-vector for histone position
:param rotation: 3-vector for histone rotation (euler angles)
:param genome: string defining the genome for the histone
:param chain: Chain index for histone and basepairs therein
:param histone_index: An index for the histone (by default, order in the solenoid)
"""
radius_histone = 25 # radius of histone, angstrom
pitch_dna = 23.9 # 23.9 # pitch of DNA helix, angstrom
radius_dna = 41.8 # radius of DNA wrapping, angstrom
histone_bps = 146 # number of bps in histone
histone_turns = 1.65 * 2 * np.pi # angular turn around histone, radians
height = 27 * 1.65
z_offset = -height / 2.0 # distance between first bp and xy-plane, angstrom
# separation of bps around histone, angstrom
hist_bp_separation = histone_turns * radius_dna / histone_bps
hist_bp_rotation = BP_ROTATION # screw rotation of bp, radians
z_per_bp = height / histone_bps
turn_per_bp = histone_turns / histone_bps
z_angle = np.arctan(1.0 / pitch_dna)
histone_start_bp_rot = 0 # radians, rotation of bp at start of histone
histone_end_bp_rot = histone_start_bp_rot + histone_bps * hist_bp_rotation
histone_total_twist = (histone_bps * hist_bp_separation) % (2 * np.pi)
def __init__(
self,
position: Union[List, np.array],
rotation: Union[List, np.array],
genome: str = None,
chain: int = 0,
histone_index: int = 0,
):
"""Create a Histone"""
assert len(position) == 3, "position is length 3 array"
assert len(rotation) == 3, "position is length 3 array"
if genome is None:
genome = "".join(
[
np.random.choice(["G", "A", "T", "C"])
for ii in range(self.histone_bps)
]
)
assert len(genome) == self.histone_bps, "genome should be {} base pairs".format(
self.histone_bps
)
self.histone_index = histone_index
self.position = np.array(position)
self.rotation = np.array(rotation)
self.chain = chain
self.basepairs = []
theta = -0.5 * (self.histone_turns - 3 * np.pi)
z = self.z_offset
for ii, char in enumerate(genome):
bp = basepair.BasePair(
char,
chain=chain,
position=np.array([0, 0, 0]),
rotation=np.array([0, 0, 0]),
index=ii,
)
# make rotation matrix
rmatrix = r.rotx(np.pi / 2.0 + self.z_angle)
rmatrix = np.dot(r.rotz(theta), rmatrix)
bp.rotate(rmatrix)
bp.rotate(
np.dot(
rmatrix,
np.dot(
r.rotz(self.histone_start_bp_rot + ii * self.hist_bp_rotation),
np.linalg.inv(rmatrix),
),
)
)
# bp.rotate(np.array([np.pi/2., 0., 0]))
# bp.rotate(np.array([0, 0, theta]))
# bp.rotate(np.array([ii*self.turn_per_bp, 0, 0]))
x = self.radius_dna * np.cos(theta)
y = self.radius_dna * np.sin(theta)
position = np.array([x, y, z])
bp.translate(position)
theta += self.turn_per_bp
z += self.z_per_bp
self.basepairs.append(bp)
for bp in self.basepairs:
bp.rotate(self.rotation, about_origin=True)
bp.translate(self.position)
return None
def as_series(self) -> pd.Series:
"""Express the histone as a single molecule in a pandas series
:returns: Pandas Series for Histone
"""
return pd.Series(
{
"name": "Histone",
"shape": "sphere",
"chain_idx": self.chain,
# "strand_idx": -1,
"histone_idx": self.histone_index,
"size_x": self.radius_histone,
"size_y": self.radius_histone,
"size_z": self.radius_histone,
"pos_x": self.position[0],
"pos_y": self.position[1],
"pos_z": self.position[2],
"rot_x": self.rotation[0],
"rot_y": self.rotation[1],
"rot_z": self.rotation[2],
}
)
def translate(self, translation: Union[List, np.array]) -> None:
"""Translate the histone spatially
:param translation: 3-vector for translation
"""
for bp in self.basepairs:
bp.translate(translation)
self.position += translation
return None
def setChain(self, chainIdx: int) -> None:
"""Set the Chain Index of all base pairs in the histone
:param chainIdx: Index for Chain
"""
self.chain = chainIdx
for bp in self.basepairs:
bp.setNewChain(chainIdx)
return None
class Solenoid(PlottableSequence):
"""
*Inherits from PlottableSequence*
Define Solenoidal DNA in a voxel (basically a box).
This method works by placing histones around the z-axis (≈6 histones
per rotation) and then joining them together using SplineLinkers
:param voxelheight: Height of 'voxel' in angstrom
:param radius: Radius from Solenoid centre to histone centre
:param nhistones: Number of histones to place
:param histone_angle: tilt of histones from axis in degrees
:param twist: whether the DNA exiting the final spine should be
rotated an extra pi/2.
:param chain: Chain index for solenoid and basepairs therein
"""
def __init__(
self,
voxelheight: float = 750,
radius: float = 100,
nhistones: int = 38,
histone_angle: float = 50,
twist: bool = False,
chain: int = 0,
):
self.radius = radius
self.voxelheight = voxelheight
self.nhistones = nhistones
self.chain = chain
self.tilt = histone_angle * np.pi / 180.0
self.zshift = (self.voxelheight - 4.0 * Histone.radius_histone) / self.nhistones
self.height = (self.nhistones - 1) * self.zshift # length of the fibre
prev_bp1 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, -1 * BP_SEPARATION]),
index=-2,
)
prev_bp2 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, -0 * BP_SEPARATION]),
index=-1,
)
rot = np.array([0, 0, np.pi / 2.0]) if twist is True else np.zeros(3)
next_bp3 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, self.voxelheight + 0.0 * BP_SEPARATION]),
rotation=rot,
index=1000,
)
next_bp4 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, self.voxelheight + 1.0 * BP_SEPARATION]),
rotation=rot,
index=1001,
)
self.basepairs = []
self.positions = [
np.array([0, -self.radius, 0.5 * (self.voxelheight - self.height)])
]
rm = r.eulerMatrix(np.pi / 2.0, -np.pi / 2.0, np.pi / 2.0)
rm = np.dot(r.roty(self.tilt), rm)
self.rotations = [r.getEulerAngles(rm)]
for ii in range(self.nhistones - 1):
last = self.positions[-1]
this = np.dot(r.rotz(np.pi / 3.0), last)
this[2] = last[2] + self.zshift
self.positions.append(this)
last = self.rotations[-1]
this = np.array([last[0], last[1], last[2] + np.pi / 3.0])
self.rotations.append(this)
self.histones = []
self.linkers = (
[]
) # the BPs in the linkers array are also in the basepairs array
for ii, (pos, rot) in enumerate(zip(self.positions, self.rotations)):
h = Histone(pos, rot, chain=chain, histone_index=ii)
self.histones.append(h)
if len(self.histones) > 1:
bp1 = self.histones[-2].basepairs[-2]
bp2 = self.histones[-2].basepairs[-1]
bp3 = self.histones[-1].basepairs[0]
bp4 = self.histones[-1].basepairs[1]
zr = -Histone.histone_total_twist - np.pi / 3 + Histone.hist_bp_rotation
l = SplineLinker(
bp1,
bp2,
bp3,
bp4,
curviness=1,
zrot=zr,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
else:
bp3 = self.histones[-1].basepairs[0]
bp4 = self.histones[-1].basepairs[1]
l = SplineLinker(
prev_bp1,
prev_bp2,
bp3,
bp4,
curviness=1,
zrot=0,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
self.basepairs.extend(h.basepairs)
# Add final linker
bp1 = self.histones[-1].basepairs[-2]
bp2 = self.histones[-1].basepairs[-1]
zr = -Histone.histone_total_twist - np.pi / 3 * (self.nhistones % 6)
if twist is True:
zr += np.pi / 2.0
l = SplineLinker(
bp1,
bp2,
next_bp3,
next_bp4,
curviness=1,
zrot=zr,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
# reset bp indices
for ii, bp in enumerate(self.basepairs):
bp.set_bp_index(ii)
return None
def translate(self, translation: Union[List, np.array]) -> None:
"""Translate the solenoid spatially
:param translation: 3-vector for translation
"""
for histone in self.histones:
histone.translate(translation)
for linker in self.linkers:
linker.translate(translation)
return None
def setChain(self, chainIdx: int) -> None:
"""Set the Chain Index of all base pairs in the solenoid
:param chainIdx: Index for Chain
"""
self.chain = chainIdx
for histone in self.histones:
histone.setChain(chainIdx)
for linker in self.linkers:
linker.setChain(chainIdx)
for basepair in self.basepairs:
basepair.setNewChain(chainIdx)
return None
def histones_to_frame(self) -> pd.DataFrame:
"""Get Histones in Solenoid as a dataframe of their positions
:return: DataFrame of Histones
"""
return pd.DataFrame([histone.as_series() for histone in self.histones])
class TurnedSolenoid(Solenoid):
"""
*Inherits from Solenoid*
Define Solenoidal DNA in a voxel (basically a box). This Solenoid
will turn 90 degrees through the box
This method works by placing histones around the z-axis (≈6 histones
per rotation) and then joining them together using SplineLinkers
:param voxelheight: Height of 'voxel' in angstrom
:param radius: Radius of circle the solenoid is turning around (angstrom)
:param radius: Radius from Solenoid centre to histone centre
:param nhistones: Number of histones to place
:param histone_angle: tilt of histones from axis in degrees
:param twist: whether the DNA exiting the final spine should be
rotated an extra pi/2.
:param chain: Chain index for solenoid and basepairs therein
"""
def __init__(
self,
voxelheight: float = 750,
radius: float = 100,
nhistones: int = 38,
histone_angle: float = 50,
twist: bool = False,
chain: int = 0,
):
"""
Constructor
"""
self.nhistones = int(nhistones / 2**0.5)
self.box_width = voxelheight / 2.0
self.radius = radius
self.chain = chain
self.strand_length = voxelheight / 2**0.5
self.zshift = (
self.strand_length - 4.0 * Histone.radius_histone
) / self.nhistones
self.height = (self.nhistones - 1) * self.zshift
self.tilt = histone_angle * np.pi / 180.0
prev_bp1 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, -1 * BP_SEPARATION]),
index=-2,
)
prev_bp2 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([0, 0, -0 * BP_SEPARATION]),
index=-1,
)
rot = np.array([0, 0, np.pi / 2.0]) if twist is True else np.zeros(3)
next_bp3 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([self.box_width + 0 * BP_SEPARATION, 0, self.box_width]),
rotation=rot,
index=1000,
)
next_bp4 = basepair.BasePair(
np.random.choice(["G", "A", "T", "C"]),
chain=chain,
position=np.array([self.box_width + 1 * BP_SEPARATION, 0, self.box_width]),
rotation=rot,
index=1001,
)
self.basepairs = []
# print(self.height, self.zshift, self.nhistones)
pos1 = np.array([0, -self.radius, 0.5 * (self.strand_length - self.height)])
self.positions = [np.dot(r.rotz(0 * np.pi / 3.0), pos1)] # start at 2pi/3
rm = r.eulerMatrix(np.pi / 2.0, -np.pi / 2.0, np.pi / 2.0 + 0 * np.pi / 3.0)
rm = np.dot(r.roty(self.tilt), rm)
self.rotations = [r.getEulerAngles(rm)]
for ii in range(self.nhistones - 1):
last = self.positions[-1]
this = np.dot(r.rotz(np.pi / 3.0), last)
this[2] = last[2] + self.zshift
self.positions.append(this)
last = self.rotations[-1]
this = np.array([last[0], last[1], last[2] + np.pi / 3.0])
self.rotations.append(this)
# Rotate positions/rotations through pi/2.
for ii in range(0, len(self.positions)):
pos = self.positions[ii]
old_x = pos[0]
old_z = pos[2]
ang_histone = old_z / self.strand_length * np.pi / 2.0
ang_pos = old_z / self.strand_length * np.pi / 4.0
# need to do two rotations to eliminate shear
ref_x = old_z * np.sin(ang_pos)
ref_z = old_z * np.cos(ang_pos)
new_x1 = old_z * np.sin(ang_pos) + old_x * np.cos(ang_pos)
new_z1 = old_z * np.cos(ang_pos) - old_x * np.sin(ang_pos)
new_x2 = new_x1 - ref_x
new_z2 = new_z1 - ref_z
new_x = ref_x + new_z2 * np.sin(ang_pos) + new_x2 * np.cos(ang_pos)
new_z = ref_z + new_z2 * np.cos(ang_pos) - new_x2 * np.sin(ang_pos)
self.positions[ii] = [new_x, pos[1], new_z]
rot = self.rotations[ii]
rm = r.eulerMatrix(*rot)
rm = np.dot(r.roty(ang_histone), rm)
self.rotations[ii] = r.getEulerAngles(rm)
self.histones = []
self.linkers = []
for ii, (pos, rot) in enumerate(zip(self.positions, self.rotations)):
h = Histone(pos, rot, chain=chain, histone_index=ii)
self.histones.append(h)
if len(self.histones) > 1:
bp1 = self.histones[-2].basepairs[-2]
bp2 = self.histones[-2].basepairs[-1]
bp3 = self.histones[-1].basepairs[0]
bp4 = self.histones[-1].basepairs[1]
zr = -Histone.histone_total_twist - np.pi / 3 + Histone.hist_bp_rotation
l = SplineLinker(
bp1,
bp2,
bp3,
bp4,
curviness=1,
zrot=zr,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
else:
bp3 = self.histones[-1].basepairs[0]
bp4 = self.histones[-1].basepairs[1]
l = SplineLinker(
prev_bp1,
prev_bp2,
bp3,
bp4,
curviness=1,
zrot=0,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
self.basepairs.extend(h.basepairs)
# Add final linker
bp1 = self.histones[-1].basepairs[-2]
bp2 = self.histones[-1].basepairs[-1]
zr = -Histone.histone_total_twist - np.pi / 3 * (self.nhistones % 6)
if twist is True:
zr += np.pi / 2.0
l = SplineLinker(
bp1,
bp2,
next_bp3,
next_bp4,
curviness=1.0,
zrot=zr,
method="corrected_quaternion",
chain=chain,
)
self.linkers.append(l)
self.basepairs.extend(l.basepairs)
# reset bp indices
for ii, bp in enumerate(self.basepairs):
bp.set_bp_index(ii)
return None
class MultiSolenoidVolume(PlottableSequence):
"""
Class to build placement volumes that contain multiple solenoidal DNA
strands.
Constructor:
MultiSolenoidVolume(voxelheight=1500., separation=400, twist=False,
turn=False)
voxelheight: size of placement volume
separation: separation between DNA strands
Try:
dna = MultiSolenoidVolume()
dna.to_line_plot()
dna.to_text()
"""
def __init__(
self,
voxelheight: float = 1500.0,
separation: float = 400,
twist: bool = False,
turn: bool = False,
chains: List = list(range(9)),
):
if not (len(chains) == len(set(chains))):
raise ValueError("The same chain cannot be generated twice")
if not set(chains).issubset(set(range(9))):
raise ValueError(f"Valid Chains are {set(range(9))} and must be ints")
self.voxelheight = voxelheight
self.radius = 100
self.nhistones = int(38 * voxelheight / 750.0)
self.histone_angle = 50
self.sep = separation
self.twist = twist
self.turn = turn
self.basepairs = []
self.histones = []
self.linkers = []
if turn is True:
big_height = 2 * (self.voxelheight / 2.0 + self.sep)
little_height = 2 * (self.voxelheight / 2.0 - self.sep)
lengths = [
self.voxelheight,
little_height,
self.voxelheight,
big_height,
self.voxelheight,
little_height,
big_height,
big_height,
little_height,
]
else:
lengths = [self.voxelheight] * 9
translations = [
np.array([0, 0, 0]),
np.array([self.sep, 0, 0]),
np.array([0, self.sep, 0]),
np.array([-self.sep, 0, 0]),
np.array([0, -self.sep, 0]),
np.array([self.sep, self.sep, 0]),
np.array([-self.sep, self.sep, 0]),
np.array([-self.sep, -self.sep, 0]),
np.array([self.sep, -self.sep, 0]),
]
solenoids = []
for ii in chains:
if self.turn is True:
nhistones = int(self.nhistones * lengths[ii] / self.voxelheight)
s = TurnedSolenoid(
voxelheight=lengths[ii],
radius=self.radius,
nhistones=nhistones,
histone_angle=self.histone_angle,
twist=self.twist,
chain=ii,
)
else:
s = Solenoid(
voxelheight=lengths[ii],
radius=self.radius,
nhistones=self.nhistones,
histone_angle=self.histone_angle,
twist=self.twist,
chain=ii,
)
# s.setChain(ii)
s.translate(translations[ii])
solenoids.append(s)
for s in solenoids:
self.basepairs.extend(s.basepairs)
self.linkers.extend(s.linkers)
self.histones.extend(s.histones)
return None
class DNAChain(PlottableSequence):
"""
*Inherits from PlottableSequence*
A single DNA Chain built from a genome specified.
:param genome: A string specifying the genome, e.g. 'AGTATC'
:param chain: The Chain index to label this strand
"""
def __init__(self, genome: str, chain: int = 0):
"""
Construct a DNA chain from a genome specified from a string
"""
self.basepairs_chain0 = self._makeFromGenome(genome, chain=chain)
self.basepairs = self.basepairs_chain0
self.center_in_z()
@staticmethod
def _makeFromGenome(genome: str, chain: int = 0):
"""
:param genome: String of the genome, e.g. "GATTACA"
:param chain: Integer to set as the chain index
:return: DNA Chain object
"""
dnachain = []
position = np.array([0, 0, 0], dtype=float)
rotation = np.array([0, 0, 0], dtype=float)
index = 0
for char in genome:
# print("Appending " + char)
dnachain.append(
basepair.BasePair(
char, chain=chain, position=position, rotation=rotation, index=index
)
)
position += np.array([0.0, 0.0, BP_SEPARATION])
rotation += np.array([0.0, 0.0, BP_ROTATION])
index += 1
return dnachain
@staticmethod
def _turnAndTwistChain(chain, twist=0.0):
zmax = 0
zmin = 0
for pair in chain:
# for (name, mol) in pair.iterMolecules():
if pair.position[2] < zmin:
zmin = pair.position[2]
elif pair.position[2] > zmax:
zmax = pair.position[2]
zrange = zmax - zmin
radius = 2.0 * zrange / np.pi
# print(radius)
for pair in chain:
# Translation of the frame - new center position
theta = np.pi / 2.0 * (pair.position[2] - zmin) / zrange
new_origin = np.array(
[radius * (1 - np.cos(theta)), 0.0, radius * np.sin(theta) - radius]
)
# rotation of the frame
# oldframe = np.array([mol.position[0], mol.position[1], 0])
yang = np.pi / 2.0 * (pair.position[2] - zmin) / zrange
pair.rotate(np.array([0, yang, 0]), about_origin=True)
xang = twist * (pair.position[2] - zmin) / zrange
chain_z_axis = pair.rmatrix[:, 2]
rmatrix = r.rot_ax_angle(chain_z_axis, xang)
pair.rotate(rmatrix, about_origin=True)
pair.translate(new_origin - pair.position)
return chain
def center_in_z(self):
"""
Center the molecule around the z=0 plane
"""
minz = 0
maxz = 0
for bp in self.basepairs:
for (name, mol) in bp.iterMolecules():
if mol.position[2] < minz:
minz = mol.position[2]
elif mol.position[2] > maxz:
maxz = mol.position[2]
ztrans = (minz - maxz) / 2.0 - minz
translation = np.array([0.0, 0.0, ztrans])
for bp in self.basepairs:
bp.translate(translation)
return None
class TurnedDNAChain(DNAChain):
"""
*Inherits from DNAChain*
TurnedDNAChain(genome)
Construct a single turned, twisted DNA chaiun
:param genome: string of GATC specifying genome order
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super().__init__(genome)
self.turnDNA()
def turnDNA(self):
self.basepairs = DNAChain._turnAndTwistChain(self.basepairs)
return None
class TurnedTwistedDNAChain(DNAChain):
"""
*Inherits from DNAChain*
TurnedTwistedDNAChain(genome)
Construct a single turned, twisted DNA chaiun
:param genome: string of GATC specifying genome order
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super().__init__(genome)
self.turnAndTwistDNA()
def turnAndTwistDNA(self):
self.basepairs = DNAChain._turnAndTwistChain(self.basepairs, twist=np.pi / 2.0)
return None
class DoubleDNAChain(DNAChain):
"""
*Inherits from DNAChain*
DoubleDNAChain(genome, separation)
Construct four straight DNA chains
Chain indices are assigned anticlockwise starting from the +y strand.
:param genome: string of GATC specifying genome order
:param separation: separation of each strand from the center in angstroms
"""
def __init__(self, genome, separation):
"""
DoubleDNAChain(genome, separation)
Construct two parallel straight DNA chains
"""
super().__init__(genome)
self._duplicateDNA(separation)
def _duplicateDNA(self, separation):
translation = np.array([0.0, separation / 2.0, 0.0], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation)
bp.setNewChain(1)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation)
bp.setNewChain(2)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1
class TurnedDoubleDNAChain(TurnedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self._makeFromGenome(genome)
self._duplicateDNA(separation=separation)
self._turnDNA()
class TurnedTwistedDoubleDNAChain(TurnedTwistedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self._makeFromGenome(genome)
self._duplicateDNA(separation=separation)
self._turnAndTwistDNA()
class FourStrandDNAChain(DNAChain):
"""
*Inherits from DNAChain*
FourStrandDNAChain(genome, separation)
Construct four straight DNA chains
Chain indices are assigned anticlockwise starting from the +y strand.
:param genome: string of GATC specifying genome order
:param separation: separation of each strand from the center in angstroms
"""
def __init__(self, genome: str, separation: float):
"""
constructor
"""
super().__init__(genome)
self.makeFourStrands(separation)
def makeFourStrands(self, separation):
translation_y = np.array([0.0, separation / 2.0, 0.0], dtype=float)
translation_x = np.array([separation / 2.0, 0.0, 0.0], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
self.basepairs_chain2 = deepcopy(self.basepairs_chain0)
self.basepairs_chain3 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation_y)
bp.setNewChain(0)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation_x)
bp.setNewChain(1)
for bp in self.basepairs_chain2:
bp.translate(-1 * translation_y)
bp.setNewChain(2)
for bp in self.basepairs_chain3:
bp.translate(1 * translation_x)
bp.setNewChain(3)
self.basepairs = (
self.basepairs_chain0
+ self.basepairs_chain1
+ self.basepairs_chain2
+ self.basepairs_chain3
)
class FourStrandTurnedDNAChain(DNAChain):
"""
*Inherits from DNAChain*
FourStrandTurnedDNAChain(genome, separation)
Construct four DNA chains that turn 90 degrees.
Chain indices are assigned anticlockwise starting from the +y strand.
:param genome: string of GATC specifying genome order
:param separation: separation of each strand from the center in angstroms
:param twist: boolean, add a 90 deg twist to each chain
"""
def __init__(self, genome: str, separation: float, twist: bool = False):
"""
Constructor
"""
DNAChain.__init__(self, genome)
translation_y = np.array([0.0, separation / 2.0, 0.0], dtype=float)
translation_x = np.array(
[separation / 2.0, 0.0, -separation / 2.0], dtype=float
)
ang = np.pi / 2.0 if twist is True else 0
radiusC0 = len(self.basepairs_chain0) * BP_SEPARATION * 2 / np.pi
radiusC3 = radiusC0 - separation / 2.0
radiusC1 = radiusC0 + separation / 2.0
self.basepairs_chain2 = DNAChain(genome, chain=2).basepairs
lengthC3 = int(np.floor(radiusC3 / radiusC0 * len(genome)))
lengthC1 = int(np.floor(radiusC1 / radiusC0 * len(genome)))
longGenome = genome * int(np.ceil(radiusC1 / radiusC0))
genome_chain3 = genome[:lengthC3]
self.basepairs_chain3 = DNAChain(genome_chain3, chain=3).basepairs
genome_chain1 = longGenome[:lengthC1]
self.basepairs_chain1 = DNAChain(genome_chain1, chain=1).basepairs
chains = [
self.basepairs_chain0,
self.basepairs_chain1,
self.basepairs_chain2,
self.basepairs_chain3,
]
transforms = [+translation_y, -translation_x, -translation_y, +translation_x]
angles = [
ang + (2 * np.pi - BP_ROTATION * len(c) % (2 * np.pi)) for c in chains
]
for (ii, (c, t, a)) in enumerate(zip(chains, transforms, angles)):
c = self._turnAndTwistChain(c, twist=a)
for bp in c:
bp.translate(t)
chains[ii] = c
self.basepairs = []
for c in chains:
self.basepairs.extend(c)
return None
class EightStrandDNAChain(DNAChain):
"""
Construct eight DNA chains that can turn 90 degrees if turn=True
Chain indices are assigned anticlockwise starting from the +y strand,
first to the inner four strands, then two the outer four strands.
i.e.::
____________________
| |
| 4 |
| 0 |
| 5 1 3 7 |
| 2 |
| 6 |
|__________________|
Strands 1 and 3, 0 and 2 are separated by sep1
Strands 4 and 6, 5 and 7 are separated by sep2
:param genome: string of GATC specifying genome order
:param sep1: separation of inner strands from the center in angstroms
:param sep2: separation of outer strands from the center in angstroms
:param turn: boolean, turn strands 90 degrees along box
:param twist: boolean, add a 90 deg twist to each chain
"""
def __init__(
self,
genome: str,
sep1: float,
sep2: float,
turn: bool = False,
twist: bool = False,
):
"""
EightStrandTurnedDNAChain(genome, sep1, sep2, turn=False, twist=False)
"""
DNAChain.__init__(self, genome)
v1 = -sep1 / 2.0 if turn is True else 0
v2 = -sep2 / 2.0 if turn is True else 0
trans_y1 = np.array([0.0, sep1 / 2.0, 0.0], dtype=float)
trans_x1 = np.array([sep1 / 2.0, 0.0, v1], dtype=float)
trans_y2 = np.array([0.0, sep2 / 2.0, 0.0], dtype=float)
trans_x2 = np.array([sep2 / 2.0, 0.0, v2], dtype=float)
ang = np.pi / 2.0 if twist is True else 0
# centrally aligned strands
self.basepairs_chain2 = DNAChain(genome, chain=2).basepairs
self.basepairs_chain4 = DNAChain(genome, chain=4).basepairs
self.basepairs_chain6 = DNAChain(genome, chain=6).basepairs
radiusC0 = len(self.basepairs_chain0) * BP_SEPARATION * 2 / np.pi
if turn is True:
radiusC1 = radiusC0 + sep1 / 2.0
radiusC3 = radiusC0 - sep1 / 2.0
radiusC5 = radiusC0 + sep2 / 2.0
radiusC7 = radiusC0 - sep2 / 2.0
else:
radiusC1 = radiusC0
radiusC3 = radiusC0
radiusC5 = radiusC0
radiusC7 = radiusC0
lengthC1 = int(np.floor(radiusC1 / radiusC0 * len(genome)))
lengthC3 = int(np.floor(radiusC3 / radiusC0 * len(genome)))
lengthC5 = int(np.floor(radiusC5 / radiusC0 * len(genome)))
lengthC7 = int(np.floor(radiusC7 / radiusC0 * len(genome)))
longGenome = genome * int(np.ceil(radiusC5 / radiusC0))
self.basepairs_chain1 = DNAChain(longGenome[:lengthC1], chain=1).basepairs
self.basepairs_chain3 = DNAChain(longGenome[:lengthC3], chain=3).basepairs
self.basepairs_chain5 = DNAChain(longGenome[:lengthC5], chain=5).basepairs
self.basepairs_chain7 = DNAChain(longGenome[:lengthC7], chain=7).basepairs
chains = [
self.basepairs_chain0,
self.basepairs_chain1,
self.basepairs_chain2,
self.basepairs_chain3,
self.basepairs_chain4,
self.basepairs_chain5,
self.basepairs_chain6,
self.basepairs_chain7,
]
transforms = [
+trans_y1,
-trans_x1,
-trans_y1,
+trans_x1,
+trans_y2,
-trans_x2,
-trans_y2,
+trans_x2,
]
angles = [
ang + (2 * np.pi - BP_ROTATION * len(c) % (2 * np.pi)) for c in chains
]
# print(angles)
for (ii, (c, t, a)) in enumerate(zip(chains, transforms, angles)):
if turn is True:
c = self._turnAndTwistChain(c, twist=a)
for bp in c:
bp.translate(t)
chains[ii] = c
self.basepairs = []
for c in chains:
self.basepairs.extend(c)
return None
|
natl/fractaldna
|
fractaldna/dna_models/dnachain.py
|
Python
|
mit
| 57,363
|
[
"Mayavi"
] |
9fa374338e2ac85d9bb0d377b301002fc70fe9a3b23add673a86713722a2e788
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rest_framework_jwt.views import obtain_jwt_token
from nomadgram import views
urlpatterns = [
url(settings.ADMIN_URL, admin.site.urls),
# User management
# url(r'^api-token-auth/', obtain_jwt_token),
url(r'rest-auth/', include('rest_auth.urls')),
url(r'rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'users/', include('nomadgram.users.urls', namespace='users')),
url(r'images/', include('nomadgram.images.urls', namespace='images')),
url(r'notifications/', include('nomadgram.notifications.urls', namespace='notifications')),
url(r'accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^', views.ReactAppView.as_view()),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url('400/$', default_views.bad_request, kwargs={'Exception': Exception('Bad Request!')}),
url('403/$', default_views.permission_denied, kwargs={'Exception': Exception('Permission Denied')}),
url('404/$', default_views.page_not_found, kwargs={'Exception': Exception('Page not Found')}),
url('500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
plusbeauxjours/nomadgram
|
config/urls.py
|
Python
|
mit
| 1,813
|
[
"VisIt"
] |
de66aff480195a85e20985952edbbd6f74838e7eff52c900f27eddae16231121
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from datetime import datetime
import time
import threading
import sys
import feedparser
import requests
import logging
import subprocess
import os
logger = logging.getLogger('remindor_common')
from remindor_common import database as db
from remindor_common.datetimeutil import internal_date_format, internal_time_format
news_format = internal_date_format + " " + internal_time_format + ":%S"
class BlogReader(threading.Thread):
def __init__(self, rssfeed, database_file):
threading.Thread.__init__(self)
self.rssfeed = rssfeed
self.database_file = database_file
def run(self):
logger.debug("checking for news")
try:
rss = feedparser.parse(self.rssfeed)
if rss.status == 200:
u = rss.entries[0].updated_parsed
updated = datetime(u.tm_year, u.tm_mon, u.tm_mday, u.tm_hour, u.tm_min, u.tm_sec)
updated_s = updated.strftime(news_format)
database = db.Database(self.database_file)
news = database.get_internal("news_flash")
update_btn = False
if news == None:
database.set_internal("news_flash", updated_s)
update_btn = True
else:
news = datetime.strptime(news, news_format)
if updated > news:
database.set_internal("news_flash", updated_s)
update_btn = True
logger.debug("update: " + str(update_btn))
if update_btn:
database.set_internal("new_news", "1")
else:
database.set_internal("new_news", "0")
database.close()
else:
logger.debug("non-normal http status: " + str(rss.status))
except:
logger.warning
logger.debug("done checking for news")
class PostRequest(threading.Thread):
def __init__(self, url, data):
threading.Thread.__init__(self)
self.data = data
self.url = url
def run(self):
status = -1
logger.debug("sending post request to: " + self.url)
try:
r = requests.post(self.url, params=self.data)
logger.debug("Post request content: " + r.content)
status = r.status_code
except:
logger.warning("Exception caught trying post request: " + str(sys.exc_info()[0]))
logger.debug("Post request status: " + str(status))
class RunCommand(threading.Thread):
def __init__(self, command, config_dir):
threading.Thread.__init__(self)
self.command = command
self.outfile = os.path.join(config_dir, 'command_output.txt')
def run(self):
logger.debug('start command: %s' % (self.command))
start = time.time()
output = ''
try:
output = subprocess.check_output(self.command, stderr=subprocess.STDOUT, shell=True)
logger.debug('command exited with a status of 0')
except subprocess.CalledProcessError as e:
logger.error('command exited with a status of %d' % (e.returncode))
output = e.output
try:
f = open(self.outfile, 'w')
f.write(output)
f.close()
except IOError:
logger.error('could not write output to the file: %s' % (self.outfile))
logger.debug('command output: %s' % (output))
logger.debug('end command (%ss)' % (time.time() - start))
|
bhdouglass/remindor-common
|
remindor_common/threads.py
|
Python
|
gpl-3.0
| 4,331
|
[
"Brian"
] |
04730c29cbdba4b27ef99c0c83ff76a4fe81ff3c4e9c0878e2a0946a61605463
|
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2019-, Dilawar Singh"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
import moose
if moose._moose.__generated_by__ != "pybind11":
print("These bindings are not generated by pybind11.")
quit(0)
import random
import numpy as np
def test_children():
a1 = moose.Neutral('/a')
a2 = moose.Neutral('/a/b')
a3 = moose.Neutral('/a/b/c1')
moose.Neutral('/a/b/c2')
assert len(a1.children) == 1
assert len(a2.children) == 2
moose.le(a1)
moose.le(a2)
moose.le(a3)
moose.setCwe(a3)
s = moose.getCwe()
assert s == a3, (s, a3)
a11 = a1.children[0]
ax = moose.element(a1)
ax1 = ax.children[0]
assert ax == a1
assert ax1 == a11
assert a11[0].isA['Neutral'], a11.isA
assert ax1[0].isA['Neutral'], a11.isA
print("test_children is done")
def test_other():
a1 = moose.Pool('/ada')
assert a1.className == 'Pool', a1.className
finfo = moose.getFieldDict(a1.className)
s = moose.Streamer('/asdada')
p = moose.PulseGen('pg1')
assert p.delay[0] == 0.0
p.delay[1] = 0.99
assert p.delay[1] == 0.99, p.delay[1]
c = moose.Stoich('/dadaa')
v1 = moose.getFieldNames(c)
v2 = c.getFieldNames()
assert v1 == v2
assert len(v1) > 10, v1
def test_vec():
a = moose.Pool('/p111', 100)
v = moose.vec(a)
# le can cause segfault in some cases.
moose.le(v)
assert len(v) == 100, len(v)
assert v == v.vec
assert v[0] == v.vec[0], (v[0], v.vec[0])
x = [random.random() for i in range(100)]
v.conc = x
assert np.isclose(np.sum(v.conc), sum(x))
assert np.allclose(v.conc, x), (v.conc, x)
# assign bool to double.
y = [float(x < 5) for x in range(100) ]
v.concInit = y
assert (v.concInit[:5] == 1.0).all(), v.concInit[:5]
assert (v.concInit[5:] == 0.0).all(), v.concInit[5:]
def test_finfos():
s = moose.SimpleSynHandler('synh')
s.numSynapses = 10
assert s.numSynapses == 10
syns = s.synapse.vec
print(s.synapse, '111')
s8a = s.synapse[8]
s8b = s.synapse[-2]
assert s8a == s8b, (s8a, s8b)
# negative indexing.
assert syns[-2] == syns[len(syns)-2]
assert len(syns) == 10
for i, s in enumerate(syns):
s.weight = 9.0
for s in syns:
assert s.weight == 9.0
# this is a shorthand for above for loop.
syns.weight = 11.121
assert np.allclose(syns.weight, 11.121), syns.weight
# try:
# print(syns[11])
# except Exception as e:
# print(e, "Great. We must got an exception here")
# else:
# print(syns[11])
# raise Exception("This should have failed")
a = moose.Pool('x13213')
a.concInit = 0.1
assert 0.1 == moose.getField(a, 'concInit')
# Now get some finfos.
a = moose.element('/classes/Compartment')
def test_inheritance():
ta = moose.Table2('/tab2', 10)
tb = moose.wildcardFind('/##[TYPE=Table2]')
assert len(tb) == len(ta.vec)
for i, (t1, t2) in enumerate(zip(tb, ta.vec)):
assert t1 == t2, (t1, t2)
assert t1.id == t2.id
assert t1.dataIndex == t2.dataIndex
assert t1.path == t2.path
a = moose.CubeMesh('/dadada')
isinstance(a, moose.CubeMesh)
assert isinstance(a, moose.CubeMesh)
aa = moose.wildcardFind('/##[TYPE=CubeMesh]')[0]
assert a == aa
# This must be true for isinstance to work.
assert isinstance(aa, moose.CubeMesh), (a.__class__, aa.__class__)
a = moose.CubeMesh('yapf')
assert a.isA('CubeMesh') == a.isA['CubeMesh']
assert a.isA['CubeMesh']
assert a.isA['ChemCompt']
def test_delete():
a = moose.Neutral('/xxx')
b = moose.Neutral('/xxx/1')
c = moose.Neutral('/xxx/1/2')
d = moose.Neutral('/xxx/2')
e = moose.Neutral('/xxx/2/2')
f = moose.Neutral('/xxx/2/2/3')
x = moose.wildcardFind('/xxx/##')
assert len(x) == 5
moose.delete(e)
x = moose.wildcardFind('/xxx/##')
assert len(x) == 3
moose.delete(a)
x = moose.wildcardFind('/xxx/##')
assert len(x) == 0
def test_wrapper():
a = moose.Pool('/dadadada', concInit=9.99, nInit=10)
assert a.nInit == 10
f = moose.Function('/fun1', expr='x0+x1+A+B')
assert f.expr == 'x0+x1+A+B'
assert f.numVars == 4, f.numVars
def test_access():
a1 = moose.Pool('ac1')
try:
a2 = moose.Compartment('ac1')
except Exception:
pass
else:
raise RuntimeError("Should have failed.")
a2 = moose.element(a1)
a3 = moose.element(a1.path)
assert a2 == a3, (a2, a3)
def test_element():
a = moose.Pool('xxxx', 2)
ae = moose.element(a)
assert ae.parent == a.parent, (ae.parent, a.parent)
def test_typing():
a = moose.Pool('x123y', 100)
a.concInit = True
assert a.concInit == 1.0, a.concInit
a.concInit = False
assert a.concInit == 0.0, a.concInit
av = moose.vec(a)
av.concInit = 1.0
assert np.allclose(av.concInit, 1.0), av.concInit
av.concInit = 0.012
assert np.allclose(av.concInit, 0.012), av.concInit
av.concInit = True
assert np.allclose(av.concInit, 1.0), av.concInit
def test_elements():
a = moose.HHChannel('hhchannel')
x = a.gateX
y = a.gateY
z = a.gateZ
xe, ye, ze = (moose.element(a) for a in (x, y, z))
assert xe.isA['HHGate']
assert ye.isA['HHGate']
assert ze.isA['HHGate']
def test_paths():
x = moose.Neutral('///x')
assert x.path == '/x', x.path
def test_le():
# see issue BhallaLab/moose-core#423
x = moose.le('/')
assert len(x) > 5, x
try:
moose.le('/abrakadabra')
except ValueError:
pass
else:
raise RuntimeError("This should have raised ValueError")
def main():
test_paths()
test_children()
test_finfos()
test_other()
test_delete()
test_wrapper()
test_inheritance()
test_access()
test_element()
test_vec()
test_typing()
test_elements()
test_le()
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_api.py
|
Python
|
gpl-3.0
| 6,080
|
[
"MOOSE"
] |
08f91dd3de66fde0daec7f31e2862e7b6ef343f3b5dfc72ddc34f60576658c8f
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import unicode_literals
import six
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_raises,TestCase
from MDAnalysis import units
from MDAnalysis.core import flags
class TestDefaultUnits(TestCase):
@staticmethod
def test_length():
assert_equal(flags['length_unit'], 'Angstrom',
u"The default length unit should be Angstrom (in core.flags)")
@staticmethod
def test_time():
assert_equal(flags['time_unit'], 'ps',
u"The default length unit should be pico seconds (in core.flags)")
@staticmethod
def test_convert_gromacs_trajectories():
assert_equal(flags['convert_lengths'], True,
u"The default behaviour should be to auto-convert Gromacs trajectories")
class TestUnitEncoding(TestCase):
@staticmethod
def test_unicode():
try:
assert_equal(units.lengthUnit_factor[u"\u212b"], 1.0)
except KeyError:
raise AssertionError("Unicode symbol for Angtrom not supported")
@staticmethod
def test_unicode_encoding_with_symbol():
try:
assert_equal(units.lengthUnit_factor[u"Å"], 1.0)
except KeyError:
raise AssertionError("UTF-8-encoded symbol for Angtrom not supported")
class TestConstants(object):
# CODATA 2010 (NIST): http://physics.nist.gov/cuu/Constants/
# (accessed 2015-02-15)
# Add a reference value to this dict for every entry in
# units.constants
constants_reference = {
'N_Avogadro': 6.02214129e+23, # mol**-1
'elementary_charge': 1.602176565e-19, # As
'calorie': 4.184, # J
}
def test_constant(self):
for name, value in six.iteritems(self.constants_reference):
yield self.check_physical_constant, name, value
@staticmethod
def check_physical_constant(name, reference):
assert_almost_equal(units.constants[name], reference)
class TestConversion(object):
@staticmethod
def _assert_almost_equal_convert(value, u1, u2, ref):
assert_almost_equal(units.convert(value, u1, u2), ref,
err_msg="Conversion {0} --> {1} failed".format(u1, u2))
# generate individual test cases using nose's test generator mechanism
def test_length(self):
nm = 12.34567
A = nm * 10.
yield self._assert_almost_equal_convert, nm, 'nm', 'A', A
yield self._assert_almost_equal_convert, A, 'Angstrom', 'nm', nm
def test_time(self):
yield self._assert_almost_equal_convert, 1, 'ps', 'AKMA', 20.45482949774598
yield self._assert_almost_equal_convert, 1, 'AKMA', 'ps', 0.04888821
def test_energy(self):
yield self._assert_almost_equal_convert, 1, 'kcal/mol', 'kJ/mol', 4.184
yield self._assert_almost_equal_convert, 1, 'kcal/mol', 'eV', 0.0433641
def test_force(self):
yield self._assert_almost_equal_convert, 1, 'kJ/(mol*A)', 'J/m', 1.66053892103219e-11
yield self._assert_almost_equal_convert, 2.5, 'kJ/(mol*nm)', 'kJ/(mol*A)', 0.25
yield self._assert_almost_equal_convert, 1, 'kcal/(mol*Angstrom)', 'kJ/(mol*Angstrom)', 4.184
@staticmethod
def test_unit_unknown():
nm = 12.34567
assert_raises(ValueError, units.convert, nm, 'Stone', 'nm')
assert_raises(ValueError, units.convert, nm, 'nm', 'Stone')
@staticmethod
def test_unit_unconvertable():
nm = 12.34567
A = nm * 10.
assert_raises(ValueError, units.convert, A, 'A', 'ps')
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/test_units.py
|
Python
|
gpl-2.0
| 4,653
|
[
"Gromacs",
"MDAnalysis"
] |
bad0d64c5cf4e1be2cc422f1f6cad2050b0b32b1395c2e83490ce5f2e41d9358
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
import struct
import textwrap
if sys.version_info[0] == 2:
from StringIO import StringIO
elif sys.version_info[0] > 2:
from io import StringIO
from .color import *
_debug = False
_verbose = False
indent = " "
def is_verbose():
return _verbose
def is_debug():
return _debug
def set_verbose(flag):
global _verbose
_verbose = flag
def set_debug(flag):
global _debug
_debug = False
def msg(message, *args):
cprint("@*b{==>} %s" % cescape(message))
for arg in args:
print(indent + str(arg))
def info(message, *args, **kwargs):
fmt = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
cprint("@%s{==>} %s" % (fmt, cescape(str(message))), stream=stream)
for arg in args:
if wrap:
lines = textwrap.wrap(
str(arg), initial_indent=indent, subsequent_indent=indent
)
for line in lines:
stream.write(line + '\n')
else:
stream.write(indent + str(arg) + '\n')
def verbose(message, *args, **kwargs):
if _verbose:
kwargs.setdefault('format', 'c')
info(message, *args, **kwargs)
def debug(message, *args, **kwargs):
if _debug:
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)
def error(message, *args, **kwargs):
kwargs.setdefault('format', '*r')
kwargs.setdefault('stream', sys.stderr)
info("Error: " + str(message), *args, **kwargs)
def warn(message, *args, **kwargs):
kwargs.setdefault('format', '*Y')
kwargs.setdefault('stream', sys.stderr)
info("Warning: " + str(message), *args, **kwargs)
def die(message, *args, **kwargs):
error(message, *args, **kwargs)
sys.exit(1)
def hline(label=None, **kwargs):
"""Draw a labeled horizontal line.
Options:
char Char to draw the line with. Default '-'
max_width Maximum width of the line. Default is 64 chars.
"""
char = kwargs.pop('char', '-')
max_width = kwargs.pop('max_width', 64)
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function."
% next(kwargs.iterkeys()))
rows, cols = terminal_size()
if not cols:
cols = max_width
else:
cols -= 2
cols = min(max_width, cols)
label = str(label)
prefix = char * 2 + " "
suffix = " " + (cols - len(prefix) - clen(label)) * char
out = StringIO()
out.write(prefix)
out.write(label)
out.write(suffix)
print(out.getvalue())
def terminal_size():
"""Gets the dimensions of the console: (rows, cols)."""
def ioctl_GWINSZ(fd):
try:
import fcntl # Not available on Windows
import termios # Not available on Windows
rc = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return rc
rc = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not rc:
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1])
|
ashutoshvt/psi4
|
psi4/driver/util/tty/__init__.py
|
Python
|
lgpl-3.0
| 4,312
|
[
"Psi4"
] |
3673cc5cb4d85125444e03e6309a50844484ba7d2decdd55ba7b4284368b6240
|
# coding=UTF-8
"""
Created on Mar 11, 2014
@author: rgeorgi
"""
import re
import unittest
import string
# ===============================================================================
# Sub-tasks of cleaning
# ===============================================================================
from intent.alignment.Alignment import Alignment
from xigt.model import Tier, Item, Igt, XigtCorpus
from xigt.ref import selection_re, delimiters, span_re
from xigt.errors import XigtStructureError
from xigt.codecs.xigtxml import encode_tier, encode_item, encode_igt, encode_xigtcorpus
punc_re = '[.?!,\xc2]'
list_re = '(?:[0-9]+|[a-z]|i+)'
quote_re = '[\'"\`]'
def grammaticality(ret_str):
# Now, remove leading grammaticality markers
return re.sub('([#\*\?]+)', replace_group_with_whitespace, ret_str)
def surrounding_quotes_and_parens(ret_str):
ret_str = re.sub('^\s*([\'"`\[\(]+)', replace_group_with_whitespace, ret_str)
ret_str = re.sub('([\'"`\]\)\.]+)\s*$', replace_group_with_whitespace, ret_str)
return ret_str
def split_punctuation(ret_str):
return re.sub(r'(\w+)([.?!,])+', r'\1 \2', ret_str).strip()
def remove_external_punctuation(ret_str):
ret_str = re.sub(r'(\w+)({})+\s'.format(punc_re), r'\1 ', ret_str)
ret_str = re.sub(r'(?:^|\s)({}+)(\w+)'.format(punc_re), r'\2', ret_str)
return re.sub(r'(\w+)([{}])+$'.format(punc_re), r'\1 ', ret_str)
def join_morphs(ret_str):
"""
Find tokens that have letters or numbers on two sides separated by a
period or morph and join them.
E.g. MASC . 1SG becomes "MASC.1SG"
"""
m = re.sub('([\w\d])\s*([\.\-])\s*(?=[\w\d])', r'\1\2', ret_str)
return m
def fix_grams(ret_str):
"""
Search for gram strings that have been split with whitespace and rejoin them.
For instance "3 SG" will become "3SG"
"""
for gram in ['3SG', '1PL', '2SG', '2PL']:
for i in range(1, len(gram) + 1):
first, last = gram[:i], gram[i:]
if first and last:
expr = '%s\s+%s' % (first, last)
ret_str = re.sub(expr, gram, ret_str, flags=re.I)
return ret_str
def remove_elipses(ret_str):
return re.sub('\.\s*\.\s*\.', '', ret_str)
def remove_solo_punctuation(ret_str):
ret_str = re.sub('\s*({}+)\s*'.format(punc_re), replace_group_with_whitespace, ret_str)
return ret_str
def remove_final_punctuation(ret_str):
ret_str = re.sub('({}+)$'.format(punc_re), replace_group_with_whitespace, ret_str)
return ret_str
def rejoin_letter(ret_str, letter='t', direction='right'):
"""
Reattach lone letters hanging out by their lonesome.
@param ret_str:
"""
if direction == 'right':
ret_str = re.sub(r'\s(%s)\s+(\S+)' % letter, r' \1\2', ret_str)
elif direction == 'left':
ret_str = re.sub(r'(\S+)\s+(%s)\s' % letter, r'\1\2 ', ret_str)
else:
raise Exception('Invalid direction specified!')
return ret_str
def remove_byte_char(ret_str):
return re.sub('^b["\']\s+', '', ret_str).strip()
def replace_group_with_whitespace(match_obj):
"""
:type match_obj: MatchObject
"""
match_start, match_stop = match_obj.span(1)
overall_start, overall_stop = match_obj.span(0)
start_offset = match_start - overall_start
stop_offset = (match_stop-match_start) + start_offset
new_str = '{}{}{}'.format(match_obj.group(0)[:start_offset],
' '*(stop_offset-start_offset),
match_obj.group(0)[stop_offset:])
return new_str
def remove_parenthetical_numbering(ret_str):
ret_str = re.sub('(\((?:[ivx]+|[a-z]|[1-9\.]+[a-z]?)\))', replace_group_with_whitespace, ret_str)
# ret_str = re.sub('^\s*(\(.*?\))', replace_group_with_whitespace, ret_str)
return ret_str
def remove_period_numbering(ret_str):
"""
Remove period-initial numbering like:
|
1. a. ii.
"""
number_search = '^\s*((?:[a-z]|[ivx]+)\.)'.format(list_re)
return re.sub(number_search, replace_group_with_whitespace, ret_str)
def remove_leading_numbers(ret_str):
return re.sub('^\s*([0-9]+)', replace_group_with_whitespace, ret_str)
def remove_numbering(ret_str):
ret_str = remove_parenthetical_numbering(ret_str)
ret_str = remove_period_numbering(ret_str)
ret_str = remove_leading_numbers(ret_str)
return ret_str
def remove_hyphens(ret_str):
return re.sub('[\-=]', '', ret_str)
def remove_leading_punctuation(ret_str):
return re.sub('^[%s]+' % string.punctuation, '', ret_str)
def collapse_spaces(ret_str):
return re.sub('\s+', ' ', ret_str)
# ===============================================================================
# Encode
# ===============================================================================
def rgp(o):
print(rgencode(o))
def rgencode(o):
if isinstance(o, Tier):
return encode_tier(o)
elif isinstance(o, Item):
return encode_item(o)
elif isinstance(o, Igt):
return encode_igt(o)
elif isinstance(o, XigtCorpus):
return ''.join(encode_xigtcorpus(o))
else:
raise Exception('%s is not a XIGT object, but is: %s' % (o, type(o)))
def concat_lines(linelist):
newline = ''
for line in linelist:
newline += line[:]
return newline
def merge_lines(linelist):
"""
Given two lines, merge characters that fall into blank space on
the other line.
@param linelist:
"""
newline = ''
blank_spans = []
for line in linelist:
# If this is the first line, just make it the newline
if not newline:
newline = line[:]
# Find all the blanks in the newline
blanks = re.finditer('\s+', newline)
for blank in blanks:
blank_spans.append(blank.span())
# If there is already a newline, look at the non-blank
# parts of this line and insert them.
else:
nonblanks = re.finditer('\S+', line)
for nonblank in nonblanks:
nonblank_start, nonblank_stop = nonblank.span()
nonblank_txt = nonblank.group(0)
#===============================================================
# If the nonblank occurs after the end of the original line..
#===============================================================
if nonblank_start >= len(newline):
oldline = newline[:]
newline = ''
for i in range(len(line)):
if i < nonblank_start and i < len(oldline):
newline += oldline[i]
elif nonblank_start > i >= len(oldline):
newline += ' '
else:
newline += line[i]
continue
#===============================================================
# Otherwise, look to see if it can fit inside a blank space.
#===============================================================
fits = False
for blank_start, blank_stop in blank_spans:
if nonblank_start >= blank_start and nonblank_stop <= blank_stop:
fits = True
break
if fits:
# Actually merge the strings
oldline = newline[:] # Copy the old string
newline = ''
for i in range(len(oldline)):
if nonblank_start <= i < nonblank_stop:
newline += nonblank_txt[i - nonblank_start]
else:
newline += oldline[i]
# Find all the blanks in the newline
blank_spans = []
blanks = re.finditer('\s+', newline)
for blank in blanks:
blank_spans.append(blank.span())
return newline
#===============================================================================
# Different tiers of cleaning
#===============================================================================
def clean_gloss_string(ret_str):
# Remove ellipses
# ret_str = remove_elipses(ret_str)
ret_str = join_morphs(ret_str)
ret_str = fix_grams(ret_str)
# Rejoin letters
ret_str = rejoin_letter(ret_str, 't', 'right')
ret_str = rejoin_letter(ret_str, 'h', 'left')
# Remove word-final punctuation
# ret_str = remove_external_punctuation(ret_str)
# Collapse spaces
# ret_str = collapse_spaces(ret_str)
# Remove final punctuation
ret_str = remove_final_punctuation(ret_str)
# Remove illegal chars
ret_str = re.sub('(#)', replace_group_with_whitespace, ret_str)
return ret_str
def clean_trans_string(ret_str):
# Start by removing the leading "B" stuff
# ret_str = re.sub('^b["\']', '', trans_string).strip()
# Remove word-final punctuation:
# ret_str = remove_external_punctuation(ret_str)
# Remove solo punctuation
ret_str = remove_solo_punctuation(ret_str)
# Remove surrounding quotes and parentheticals
ret_str = surrounding_quotes_and_parens(ret_str)
# Remove leading grammaticality markers
ret_str = grammaticality(ret_str)
# Remove surrounding quotes and parentheticals
# ret_str = surrounding_quotes_and_parens(ret_str)
# t seems to hang out on its own
ret_str = rejoin_letter(ret_str, letter='t', direction='right')
ret_str = rejoin_letter(ret_str, letter='h', direction='left')
ret_str = rejoin_letter(ret_str, letter='e', direction='left')
# Remove leading numbering
ret_str = remove_numbering(ret_str)
# Collapse spaces
# ret_str = collapse_spaces(ret_str)
return ret_str
def strip_leading_whitespace(lines):
"""
Given
:type lines: list[str]
"""
newlines = []
min_leading_whitespace = None
for line in lines:
leading_whitespace = re.search('^\s*', line).group(0)
if min_leading_whitespace is None:
min_leading_whitespace = len(leading_whitespace)
else:
min_leading_whitespace = min(min_leading_whitespace, len(leading_whitespace))
for line in lines:
newlines.append(line[min_leading_whitespace:])
return newlines
def clean_lang_string(ret_str):
"""
Clean the language string.
:param ret_str:
:return:
"""
# Remove leading byte string
# ret_str = remove_byte_char(ret_str)
# First remove leading parenthetical numbering
ret_str = remove_numbering(ret_str)
ret_str = surrounding_quotes_and_parens(ret_str)
# Remove spurious brackets
ret_str = re.sub('([\[\]\(\)])', replace_group_with_whitespace, ret_str)
# Split punctuation
# ret_str = remove_external_punctuation(ret_str)
# ret_str = split_punctuation(ret_str)
# Collapse spaces
# ret_str = collapse_spaces(ret_str)
# Remove final punctuation
# ret_str = remove_final_punctuation(ret_str)
# ret_str = remove_hyphens(ret_str)
return ret_str
def strict_columnar_alignment(s_a, s_b):
words_a = list(re.finditer('\S+', s_a))
words_b = list(re.finditer('\S+', s_b))
a = Alignment()
for i, word_a in enumerate(words_a):
start_a, stop_a = word_a.span()
for j, word_b in enumerate(words_b):
start_b, stop_b = word_b.span()
# CASE 1:
# word_a is completely subsumed
# by the span of word_b
if start_a >= start_b and stop_a <= stop_b:
a.add((i+1, j+1))
# CASE 2:
# word_b is completely subsumed
# by the span of word_a
elif start_b >= start_a and stop_b <= stop_a:
a.add((i+1, j+1))
return a
def is_strict_columnar_alignment(s_a, s_b):
a = strict_columnar_alignment(s_a, s_b)
return len(a.all_src()) == len(s_a.split()) and len(a.all_tgt()) == len(s_b.split())
def resolve_objects(container, expression):
"""
Return the string that is the resolution of the alignment expression
`expression`, which selects ids from `container`.
"""
itemgetter = getattr(container, 'get_item', container.get)
tokens = []
expression = expression.strip()
for sel_delim, _id, _range in selection_re.findall(expression):
item = container.find(id=_id)
if item is None:
raise XigtStructureError(
'Referred Item (id: {}) from reference "{}" does not '
'exist in the given container.'
.format(_id, expression)
)
if _range:
for spn_delim, start, end in span_re.findall(_range):
start = int(start) if start else None
end = int(end) if end else None
tokens.append((item, (start, end)))
else:
tokens.append((item, None))
return tokens
# -------------------------------------------
# Search for judgment on line
# -------------------------------------------
def get_judgment(line):
line, j = extract_judgment(line)
return j
def extract_judgment(line):
"""
Given a string, attempt to extract the judgment character ("*" or "?") from it.
:param line:
:type line: str
:return: Tuple of the altered line and the judgment character.
:rtype: tuple[str, str]
"""
judgment_re = '^[\s\'\`\"]*([\?\*])'
result = re.search(judgment_re, line)
j = None
if result:
line = re.sub(judgment_re, replace_group_with_whitespace, line)
j = result.group(1)
if '*' in line:
if j is None:
j = '*'
else:
j+= '*'
return line, j
#===============================================================================
# Backoff methods
#===============================================================================
def hyphenate_infinitive(ret_str):
return re.sub('to\s+(\S+)', r'to-\1', ret_str, flags=re.I)
#===============================================================================
# Test Cases
#===============================================================================
class TestLangLines(unittest.TestCase):
def runTest(self):
l1 = ' (38) Este taxista (*me) parece [t estar cansado]'
l1c = ' Este taxista *me parece t estar cansado '
self.assertEqual(clean_lang_string(l1), l1c)
def keep_something_test(self):
l1 = ' (1) Mangi-a.'
# l1 = ' (1) Mangi-a.'
l1_clean = clean_lang_string(l1)
l1_target = ' Mangi-a '
self.assertEquals(l1_clean, l1_target)
class TestGlossLines(unittest.TestCase):
def test_gloss(self):
g1 = 'Agnès 1SG . REC 3SG . M . THM present. FUT .3 SG'
g1_clean = clean_gloss_string(g1)
g1_target = 'Agnès 1SG.REC 3SG.M.THM present.FUT.3SG'
self.assertEquals(g1_clean, g1_target)
class TestHyphenate(unittest.TestCase):
def runTest(self):
h1 = 'the guests wanted to visit the other pavilion'
h1f = 'the guests wanted to-visit the other pavilion'
self.assertEqual(hyphenate_infinitive(h1), h1f)
class TestMergeLines(unittest.TestCase):
def runTest(self):
l1 = 'This an example merged lines'
l2 = ' is sdfa of '
merged = merge_lines([l1, l2])
tgt = 'This is an example of merged lines'
self.assertEqual(merged, tgt)
|
rgeorgi/intent
|
intent/igt/igtutils.py
|
Python
|
mit
| 15,737
|
[
"VisIt"
] |
7b47cebbad512ae623a8c6bc313794b2d936aabc3b1a6f68d9e5416595dbda75
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from copy import copy
from Bio.PDB.PDBExceptions import PDBConstructionException, PDBException
"""Base class for Residue, Chain, Model and Structure classes.
It is a simple container class, with list and dictionary like properties.
"""
class Entity:
"""
Basic container object. Structure, Model, Chain and Residue
are subclasses of Entity. It deals with storage and lookup.
"""
def __init__(self, id):
self.id=id
self.full_id=None
self.parent=None
self.child_list=[]
self.child_dict={}
# Dictionary that keeps addictional properties
self.xtra={}
# Special methods
def __len__(self):
"Return the number of children."
return len(self.child_list)
def __getitem__(self, id):
"Return the child with given id."
return self.child_dict[id]
def __delitem__(self, id):
"Remove a child."
return self.detach_child(id)
def __iter__(self):
"Iterate over children."
for child in self.child_list:
yield child
# Public methods
def get_level(self):
"""Return level in hierarchy.
A - atom
R - residue
C - chain
M - model
S - structure
"""
return self.level
def set_parent(self, entity):
"Set the parent Entity object."
self.parent=entity
def detach_parent(self):
"Detach the parent."
self.parent=None
def detach_child(self, id):
"Remove a child."
child=self.child_dict[id]
child.detach_parent()
del self.child_dict[id]
self.child_list.remove(child)
def add(self, entity):
"Add a child to the Entity."
entity_id=entity.get_id()
if self.has_id(entity_id):
raise PDBConstructionException( \
"%s defined twice" % str(entity_id))
entity.set_parent(self)
self.child_list.append(entity)
self.child_dict[entity_id]=entity
def get_iterator(self):
"Return iterator over children."
for child in self.child_list:
yield child
def get_list(self):
"Return a copy of the list of children."
return copy(self.child_list)
def has_id(self, id):
"""True if a child with given id exists."""
return (id in self.child_dict)
def get_parent(self):
"Return the parent Entity object."
return self.parent
def get_id(self):
"Return the id."
return self.id
def get_full_id(self):
"""Return the full id.
The full id is a tuple containing all id's starting from
the top object (Structure) down to the current object. A full id for
a Residue object e.g. is something like:
("1abc", 0, "A", (" ", 10, "A"))
This corresponds to:
Structure with id "1abc"
Model with id 0
Chain with id "A"
Residue with id (" ", 10, "A")
The Residue id indicates that the residue is not a hetero-residue
(or a water) beacuse it has a blank hetero field, that its sequence
identifier is 10 and its insertion code "A".
"""
if self.full_id==None:
entity_id=self.get_id()
l=[entity_id]
parent=self.get_parent()
while not (parent is None):
entity_id=parent.get_id()
l.append(entity_id)
parent=parent.get_parent()
l.reverse()
self.full_id=tuple(l)
return self.full_id
class DisorderedEntityWrapper:
"""
This class is a simple wrapper class that groups a number of equivalent
Entities and forwards all method calls to one of them (the currently selected
object). DisorderedResidue and DisorderedAtom are subclasses of this class.
E.g.: A DisorderedAtom object contains a number of Atom objects,
where each Atom object represents a specific position of a disordered
atom in the structure.
"""
def __init__(self, id):
self.id=id
self.child_dict={}
self.selected_child=None
self.parent=None
# Special methods
def __getattr__(self, method):
"Forward the method call to the selected child."
if not hasattr(self, 'selected_child'):
# Avoid problems with pickling
# Unpickling goes into infinite loop!
raise AttributeError
return getattr(self.selected_child, method)
def __getitem__(self, id):
"Return the child with the given id."
return self.selected_child[id]
# XXX Why doesn't this forward to selected_child?
# (NB: setitem was here before getitem, iter, len, sub)
def __setitem__(self, id, child):
"Add a child, associated with a certain id."
self.child_dict[id]=child
def __iter__(self):
"Return the number of children."
return iter(self.selected_child)
def __len__(self):
"Return the number of children."
return len(self.selected_child)
def __sub__(self, other):
"""Subtraction with another object."""
return self.selected_child - other
# Public methods
def get_id(self):
"Return the id."
return self.id
def disordered_has_id(self, id):
"""True if there is an object present associated with this id."""
return (id in self.child_dict)
def detach_parent(self):
"Detach the parent"
self.parent=None
for child in self.disordered_get_list():
child.detach_parent()
def get_parent(self):
"Return parent."
return self.parent
def set_parent(self, parent):
"Set the parent for the object and its children."
self.parent=parent
for child in self.disordered_get_list():
child.set_parent(parent)
def disordered_select(self, id):
"""Select the object with given id as the currently active object.
Uncaught method calls are forwarded to the selected child object.
"""
self.selected_child=self.child_dict[id]
def disordered_add(self, child):
"This is implemented by DisorderedAtom and DisorderedResidue."
raise NotImplementedError
def is_disordered(self):
"""
Return 2, indicating that this Entity is a collection of Entities.
"""
return 2
def disordered_get_id_list(self):
"Return a list of id's."
l=self.child_dict.keys()
# sort id list alphabetically
l.sort()
return l
def disordered_get(self, id=None):
"""Get the child object associated with id.
If id is None, the currently selected child is returned.
"""
if id==None:
return self.selected_child
return self.child_dict[id]
def disordered_get_list(self):
"Return list of children."
return self.child_dict.values()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/Entity.py
|
Python
|
gpl-2.0
| 7,284
|
[
"Biopython"
] |
618980f980ae08f2c37c34f46b9a242f5b674991ef7a57255723cd5f0f16c54b
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for StarOffice and OpenOffice."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.braille as braille
import orca.braille_generator as braille_generator
import orca.object_properties as object_properties
import orca.settings_manager as settings_manager
_settingsManager = settings_manager.getManager()
class BrailleGenerator(braille_generator.BrailleGenerator):
# pylint: disable-msg=W0142
def __init__(self, script):
braille_generator.BrailleGenerator.__init__(self, script)
def _generateRoleName(self, obj, **args):
result = []
role = args.get('role', obj.getRole())
if role != pyatspi.ROLE_DOCUMENT_FRAME:
result.extend(braille_generator.BrailleGenerator._generateRoleName(
self, obj, **args))
return result
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings that represent the row header for an
object that is in a table, if it exists. Otherwise, an empty
array is returned. Overridden here so that we can get the
dynamic row header(s).
"""
newOnly = args.get('newOnly', False)
rowHeader, columnHeader = \
self._script.utilities.getDynamicHeadersForCell(obj, newOnly)
if not rowHeader:
return []
text = self._script.utilities.displayedText(rowHeader)
if text:
return [text]
return []
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings that represent the column header for an
object that is in a table, if it exists. Otherwise, an empty
array is returned. Overridden here so that we can get the
dynamic column header(s).
"""
newOnly = args.get('newOnly', False)
rowHeader, columnHeader = \
self._script.utilities.getDynamicHeadersForCell(obj, newOnly)
if not columnHeader:
return []
text = self._script.utilities.displayedText(columnHeader)
if text:
return [text]
return []
def _generateSpreadSheetCell(self, obj, **args):
try:
objectText = self._script.utilities.substring(obj, 0, -1)
cellName = self._script.utilities.spreadSheetCellName(obj)
except:
return []
return [braille.Component(obj, " ".join((objectText, cellName)))]
def _generateRealTableCell(self, obj, **args):
"""Get the speech for a table cell. If this isn't inside a
spread sheet, just return the utterances returned by the default
table cell speech handler.
Arguments:
- obj: the table cell
Returns a list of utterances to be spoken for the object.
"""
result = []
if self._script.utilities.isSpreadSheetCell(obj):
result.extend(self._generateSpreadSheetCell(obj, **args))
else:
# Check to see how many children this table cell has. If it's
# just one (or none), then pass it on to the superclass to be
# processed.
#
# If it's more than one, then get the speech for each child,
# and call this method again.
#
if obj.childCount <= 1:
result.extend(braille_generator.BrailleGenerator.\
_generateRealTableCell(self, obj, **args))
else:
for child in obj:
cellResult = self._generateRealTableCell(child, **args)
if cellResult and result and self._mode == 'braille':
result.append(braille.Region(
object_properties.TABLE_CELL_DELIMITER_BRAILLE))
result.extend(cellResult)
return result
def _generateTableCellRow(self, obj, **args):
"""Get the speech for a table cell row or a single table cell
if settings.readTableCellRow is False. If this isn't inside a
spread sheet, just return the utterances returned by the default
table cell speech handler.
Arguments:
- obj: the table cell
Returns a list of utterances to be spoken for the object.
"""
result = []
if self._script.utilities.isSpreadSheetCell(obj):
# Adding in a check here to make sure that the parent is a
# valid table. It's possible that the parent could be a
# table cell too (see bug #351501).
#
parent = obj.parent
parentTable = parent.queryTable()
if _settingsManager.getSetting('readTableCellRow') and parentTable:
index = self._script.utilities.cellIndex(obj)
row = parentTable.getRowAtIndex(index)
column = parentTable.getColumnAtIndex(index)
# This is an indication of whether we should present all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
presentAll = True
if "lastRow" in self._script.pointOfReference and \
"lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
presentAll = \
(self._mode == 'braille') \
or ((pointOfReference["lastRow"] != row) \
or ((row == 0 or row == parentTable.nRows-1) \
and pointOfReference["lastColumn"] == column))
if presentAll:
[startIndex, endIndex] = \
self._script.utilities.getTableRowRange(obj)
for i in range(startIndex, endIndex):
cell = parentTable.getAccessibleAt(row, i)
showing = cell.getState().contains( \
pyatspi.STATE_SHOWING)
if showing:
cellResult = self._generateRealTableCell(cell,
**args)
if cellResult and result \
and self._mode == 'braille':
result.append(braille.Region(
object_properties.TABLE_CELL_DELIMITER_BRAILLE))
result.extend(cellResult)
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(
braille_generator.BrailleGenerator._generateTableCellRow(
self, obj, **args))
return result
def _generateChildTab(self, obj, **args):
"""If we are in the slide presentation scroll pane, also announce the
current page tab. See bug #538056 for more details.
"""
result = []
rolesList = [pyatspi.ROLE_SCROLL_PANE, \
pyatspi.ROLE_PANEL, \
pyatspi.ROLE_PANEL, \
pyatspi.ROLE_ROOT_PANE, \
pyatspi.ROLE_FRAME, \
pyatspi.ROLE_APPLICATION]
if self._script.utilities.hasMatchingHierarchy(obj, rolesList):
for child in obj.parent:
if child.getRole() == pyatspi.ROLE_PAGE_TAB_LIST:
for tab in child:
eventState = tab.getState()
if eventState.contains(pyatspi.STATE_SELECTED):
args['role'] = tab.getRole()
result.extend(self.generate(tab, **args))
return result
def generateBraille(self, obj, **args):
result = []
args['useDefaultFormatting'] = \
((obj.getRole() == pyatspi.ROLE_LIST) \
and (not obj.getState().contains(pyatspi.STATE_FOCUSABLE)))
result.extend(braille_generator.BrailleGenerator.\
generateBraille(self, obj, **args))
del args['useDefaultFormatting']
return result
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/soffice/braille_generator.py
|
Python
|
gpl-3.0
| 9,278
|
[
"ORCA"
] |
85ce39e3cac0a79ee72e97dc9bff382fd3cdd6eb6aaa95cccf224dabdf62324f
|
# $Id$
#
# Copyright (c) 2001-2006, Greg Landrum and Rational Discovery LLC,
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for the logger
"""
import unittest
import Logger
import re
class Foo:
""" a simple class
"""
def __init__(self,aVal):
self.a = aVal
def method1(self,a,b,c='foo'):
return 'method1'
def method2(self):
return 'method2'
def test1(self):
return 'test1'
def test2(self):
return 'test2'
class TestCase(unittest.TestCase):
def testBasic(self):
l = Logger.Logger(Foo,7)
try:
l.method1(1,2)
l.method1(1,2,c='grm')
l.method2()
l.method1(7,6,'pizza')
l.b = 3
l.method2()
except:
ok = 0
else:
ok = 1
assert ok,'basic calls failed'
assert l.b == 3, '__setattr__ failed'
res = l._LoggerGetLog()
assert len(res) == 6, 'length of log (%d) wrong'%(len(res))
def testPlayback(self):
l = Logger.Logger(Foo,7)
try:
l.method1(1,2)
l.method1(1,2,c='grm')
l.method2()
l.method1(7,6,'pizza')
l.b = 3
l.method2()
except:
ok = 0
else:
ok = 1
assert ok,'basic calls failed'
f = Foo(7)
replay = Logger.replay(l._LoggerGetLog(),f)
assert replay==['method1','method1','method2','method1',3,'method2'],\
'replay results (%s) wrong'%(str(replay))
assert f.b == 3, '__setattr__ failed'
def testFlush(self):
l = Logger.Logger(Foo,7,loggerFlushCommand='method2')
try:
l.method1(1,2)
l.method1(1,2,c='grm')
l.method1(7,6,'pizza')
l.b = 3
except:
ok = 0
else:
ok = 1
assert ok,'basic calls failed'
res = l._LoggerGetLog()
assert len(res)==4, 'length of log (%d) wrong'%(len(res))
l.method2()
res = l._LoggerGetLog()
assert len(res)==0, 'length of log (%d) wrong'%(len(res))
def testIgnore(self):
e1 = re.compile('test*')
l = Logger.Logger(Foo,7,loggerIgnore=[e1,'method2'])
try:
l.method1(1,2)
l.method2()
l.test1()
l.test2()
l.method1(1,2,c='grm')
l.method1(7,6,'pizza')
l.b = 3
except:
ok = 0
else:
ok = 1
assert ok,'basic calls failed'
res = l._LoggerGetLog()
assert len(res)==4, 'length of log (%d) wrong'%(len(res))
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testBasic'))
suite.addTest(TestCase('testPlayback'))
suite.addTest(TestCase('testFlush'))
suite.addTest(TestCase('testIgnore'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
|
rdkit/rdkit-orig
|
rdkit/Logger/UnitTestLogger.py
|
Python
|
bsd-3-clause
| 2,842
|
[
"RDKit"
] |
3ab098c0edcd09a4c638fa9151e38212db0b4ab1cd13799c2f235b60b4f2f057
|
#!/usr/bin/env python3
"""
script for fixing scaffolding errors by:
1. identifying errors based on stringent read mapping (stringent mapping for both reads in a pair)
2. collecting mapped reads (stringent mapping for one read in pair)
3. re-assembling collected reads
4. merging the new assembly with the old assembly
5. doing a final check of the re-assembled scaffolds
(this could be made iterative, or combined with a re-assembly step using the final assembly and
reads mapping to the final assembly - stringent mapping for one read in the pair, ideally)
"""
import os
import re
import sys
import itertools
import subprocess
from glob import glob as glob
# ctb
import ctbBio.mapped as map_tool
from ctbBio.rc import reverse_complement as rc
from ctbBio.fastq_split import split as fastq_split
from ctbBio.fix_fasta import fix_fasta as fix_fasta
from ctbBio.fasta import iterate_fasta as parse_fasta
from ctbRA.assemble import velvet as velvet
def fastq2fasta(fastq, paired = True):
"""
convert fastq file to fasta
"""
c = itertools.cycle([1, 2, 3, 4])
p = itertools.cycle([1, 2])
for line in open(fastq):
n = next(c)
if n == 1:
if paired is True:
s = ['%s/%s' % (line.strip().replace('@', '>'), next(p))]
else:
s = [line.strip().replace('@', '>')]
elif n == 2:
s.append(line.strip())
yield s
def run_bowtie(assembly, sam, pr, pr_split, sr, threads, multiple, bt_dir):
"""
map reads to assembly with bowite2
"""
if os.path.exists(sam) is False: # if sam file does not exist, run bowtie2
db_check = '%s/%s.1.bt2' % (bt_dir, assembly)
sr_command = pr_command = matches_command = ''
if os.path.exists(db_check) is False: # check for db file
p = subprocess.Popen('bowtie2-build -q %s %s/%s' \
% (assembly, bt_dir, assembly.rsplit('/', 1)[-1]), shell = True)
p.communicate()
if sr is not False:
sr_command = '-U %s' % (sr)
if pr_split is not False:
r1, r2 = pr_split
pr_command = '-1 %s -2 %s' % (r1, r2)
elif pr is not False:
base = '%s/%s' % (bt_dir, pr.rsplit('/', 1)[-1].rsplit('.', 1)[0])
r1 = '%s.R1.fastq' % (base)
r2 = '%s.R2.fastq' % (base)
pr_split = [r1, r2]
if os.path.exists(r1) is False:
fastq_split(open(pr), base)
pr_command = '-1 %s -2 %s' % (r1, r2)
if multiple is True: # should reads be allowed to map to more than one position or contig?
matches_command = '-a '
# print 'bowtie2 --very-fast --reorder --quiet %s -p %s -x %s/%s %s %s | shrinksam -k %s' \
# % (matches_command, threads, bt_dir, assembly.rsplit('/', 1)[-1], pr_command, sr_command, sam)
p.communicate()
p = subprocess.Popen('\
bowtie2 --very-fast --reorder --quiet %s -p %s -x %s/%s %s %s | shrinksam -k %s' \
% (matches_command, threads, bt_dir, assembly.rsplit('/', 1)[-1], pr_command, sr_command, sam)\
, shell = True)
p.communicate()
return sam, pr_split
def map_reads(assembly, scaffolds, pr, threads, multiple, bt_dir = 'bt2', pr_split = False, sr = False):
"""
map reads to assembly with bowite2
"""
if '/' in assembly:
bt_dir = '%s/%s' % (assembly.rsplit('/', 1)[0], bt_dir)
assembly_dir, assembly_name = assembly.rsplit('/', 1)
else:
assembly_dir, assembly_name = './', assembly
os.system('mkdir -p %s' % (bt_dir))
sam = '%s/%s.bt.sam' % (bt_dir, assembly_name.rsplit('.', 1)[0])
return run_bowtie(assembly, sam, pr, pr_split, sr, threads, multiple, bt_dir) # run bowtie, return sam file
def check_cov(c, cov_thresh):
"""
check for both read coverage and passing overlap test
c = [coverage, overlap = True or False]
"""
if c[0] >= cov_thresh and c[1] is True:
return True
return False
def errors_from_cov(s2c, cov_thresh):
"""
identify errors from low coverage region
"""
errors = {} # errors[scaffold] = [position of errors]
for scaf, cov in list(s2c.items()):
errors[scaf] = []
prev = False
for pos, c in enumerate(cov):
if check_cov(c, cov_thresh) is False:
if prev is True:
continue
prev = True
errors[scaf].append(pos)
else:
prev = False
cov[pos] = c
return errors
def mm_positions_from_md(sam, read_length):
"""
get read positions from MD flag in sam file
"""
positions = False
md = [i.rsplit(':', 1)[1] for i in sam if i.startswith('MD:Z:')][0]
if '^' in md or '+' in md: # do not count reads with indels
return True
if md == str(read_length - 1):
return positions
last = len([i for i in re.split('[0-9]', md) if len(i) > 0])
for i, pos in enumerate([i for i in re.split('[A-Z]', md) if i.isdigit()]):
if i >= last:
continue
pos = int(pos) + 1
if positions is False:
positions = [pos]
else:
positions.append(positions[-1] + pos)
if positions is False:
return True
return set(positions)
def check_mm(sam, window, read_length):
"""
make sure mismatches are not in window at beginning or end of read
if mismatches are not in the beginning or end of the read, return False
"""
mm = map_tool.count_mismatches(sam)
if mm is False:
return True
if mm == 0:
return False
mm_positions = mm_positions_from_md(sam, read_length)
if mm_positions is False:
return False
elif mm_positions is True:
return True
for pos in mm_positions:
if pos <= window or pos >= (read_length - window):
return True
return False
def add_coverage(scaffold, scaffolds, overlap, s2c, sam, window):
"""
add coverage to scaffolds for read region defined by overlap
cov_info = [coverage, overlap_coverage]
"""
read_length = overlap[1] - overlap[0] + 1
mm = check_mm(sam, window, read_length)
for i in range(overlap[0] - 1, overlap[1]):
if scaffold not in s2c:
return s2c
try:
s2c[scaffold][i][0] += 1
if i < overlap[1] - window and mm is False:
s2c[scaffold][i][1] = True
except IndexError:
break
return s2c
def id_errors(pairs, header, assembly, scaffolds, cov_thresh, mismatches, allow_orphan = False, allow_orphan_ends = False, window = 3, orphan_window = 1000):
"""
identify regions with zero coverage by stringently mapped paired reads
* pairs[read] = [bit, mate, mappping[scaffold] = [map, map2, ...], fastq]
* map = [overlap, mismatches, sam_info]
* sam_info = all sam lines except read + quality
"""
# open sam file for writing
out = open('%s.both.sam' % (assembly.rsplit('.', 1)[0]), 'w')
for line in header:
print(line, file=out)
# s2c[scaffold] = [[coverage per position, # reads connecting base to the next base], [p2, pn2]]
s2c = {id: [[0, False] for i in range(0, info[1])] for id, info in list(scaffolds.items())}
# filter reads
for read in list(pairs.values()):
bit, mate, maps, fastq = read
if mate not in pairs:
continue
mate = pairs[mate]
for scaffold, mappings in list(maps.items()):
for mapping in mappings:
overlap, mm, sam_info = mapping
if scaffold not in mate[2]:
continue
for mate_map in mate[2][scaffold]:
mate_overlap, mate_mm, mate_sam = mate_map
if mm is False and mate_mm is False: # make sure at least one pair mapped to scaffold
continue
if (allow_orphan is False and allow_orphan_ends is False) and (mm is False or mate_mm is False): # make sure both pairs mapped
continue
elif allow_orphan_ends is True and (mm is False or mate_mm is False):
pos = [sam_info[0][3], mate_sam[0][3]]
if min(pos) > orphan_window and max(pos) < scaffolds[scaffold][1] - orphan_window:
continue
if mm > mismatches or mate_mm > mismatches: # both pairs must pass mismatches
# if mm + mate_mm > mismatches: # total number of mismatches must be less than mismatches
continue
sam = mapping[2][0] + [read[3][1]] + [read[3][3]] + mapping[2][1]
print('\t'.join(sam), file=out)
s2c = add_coverage(scaffold, scaffolds, overlap, s2c, sam, window)
errors = errors_from_cov(s2c, cov_thresh)
return s2c, errors
def define_windows(scaffolds, s2errors, window, combine_windows):
"""
determine what window to use for collecting reads
if windows overlap, combine them
"""
# define windows
s2windows = {} # s2windows[scaffold][error] = [window]
for scaffold, errors in list(s2errors.items()):
if len(errors) == 0:
continue
s2windows[scaffold] = {}
for error in errors:
start = (error - int(window/2))
if start < 0:
start = 0
stop = window
if stop > scaffolds[scaffold][1]:
stop = scaffolds[scaffold][1]
else:
stop = (error + int(window/2))
if stop > scaffolds[scaffold][1]:
stop = scaffolds[scaffold][1]
start = stop - window
if start < 0:
start = 0
s2windows[scaffold][error] = [start, stop]
if combine_windows is False:
return s2windows
# combine overlapping windows
updated = {}
for scaffold in s2windows:
updated[scaffold] = {}
errors = sorted(list(s2windows[scaffold].items()), key = lambda x: x[0])
if len(errors) > 1:
i = 1
for error in errors[1:]:
prev = errors[i - 1]
if prev[1][1] >= error[1][0]:
pos = '_'.join([str(prev[0]), str(error[0])])
start, stop = prev[1][0], error[1][1]
merged = (pos, [start, stop])
del errors[i - 1]
errors[i - 1] = merged
else:
i += 1
for error in errors:
updated[scaffold][error[0]] = error[1]
return updated
def get_overlap(a, b):
return max(0, min(a[1], b[1]) - max(a[0], b[0]))
def check_overlap(overlap, window):
"""
check to see if overlap is within window
"""
if overlap is False:
return False
if get_overlap(overlap, window) > 0:
return True
return False
def map2window(scaffold, s2windows, s2errors, overlap, m_overlap):
"""
determine if reads maps within window
return errors that reads map to, or False
"""
matches = []
if scaffold not in s2windows:
return False
errors = s2windows[scaffold]
for error, window in list(errors.items()):
if check_overlap(overlap, window) is True or check_overlap(m_overlap, window) is True:
matches.append(error)
if len(matches) == 0:
return False
return matches
def collect_reads(pairs, assembly, scaffolds, mismatches, prefix, s2errors = False, window = False, combine_windows = False):
"""
collect reads mapped to scaffold within window, or for the entire contig
- only one read in pair has to pass mismatch critera
* pairs[read] = [bit, mate, mappping[scaffold] = map, fastq]
map = [overlap, mismatches, sam_info]
sam_info = all sam lines except read + quality
* reads[scaffold][error] = [[sequences-fastq], filename]
"""
# define windows
if window is not False:
s2windows = define_windows(scaffolds, s2errors, window, combine_windows)
else:
s2windows = False
# make files for reads
reads = {} # reads[scaffold][error] = pe
if window is not False:
for scaffold, errors in list(s2windows.items()):
if len(errors) == 0:
continue
reads[scaffold] = {}
for error in errors:
dir = '%s/s_%s_e_%s' % (prefix, scaffold, error)
os.system('mkdir -p %s' % (dir))
reads[scaffold][error] = [{}, '%s/reads.pe.fastq' % (dir)]
else:
reads[False] = {}
reads[False][False] = [{}, '%s.one.pe.fastq' % (assembly.rsplit('.', 1)[0])]
# get reads
for id, read in list(pairs.items()):
id, num = id.rsplit('_', 1)
bit, mate_id, maps, fastq = read
if mate_id not in pairs:
continue
mate = pairs[mate_id]
for scaffold, mappings in list(maps.items()):
for mapping in mappings:
overlap, mm, sam_info = mapping
m_fastq, m_num = mate[3], mate_id.rsplit('_', 1)[1]
if scaffold not in mate[2]:
m_info = [[False, False, False]]
else:
m_info = [map for map in mate[2][scaffold]]
for map in m_info:
m_overlap, m_mm, m_sam_info = map
if mismatches is not False and mm > mismatches and m_mm > mismatches:
# skip if both reads have too many mismatches
continue
if (mm is False and m_mm > mismatches) or (m_mm is False and mm > mismatches):
continue
if mm is False and m_mm is False:
continue
if window is False:
errors = [False]
scaffold = False
else:
errors = map2window(scaffold, s2windows, s2errors, overlap, m_overlap)
if window is not False and errors is False:
continue
for error in errors:
fq = [None, None]
fq[int(num) - 1] = '\n'.join(fastq)
fq[int(m_num) - 1] = '\n'.join(m_fastq)
reads[scaffold][error][0][id] = '\n'.join(fq)
# save reads to files
for scaffold in reads:
for error in reads[scaffold]:
seqs, name = reads[scaffold][error]
out = open(name, 'w')
for seq in list(seqs.values()):
print(seq, file=out)
out.close()
reads[scaffold][error] = out.name
return reads
def break_by_coverage(assembled, s2c, cov_thresh, ignore_ends = False):
"""
look through re-assembled contigs and break them where stringent coverage is low
"""
fragments = [] # fragments = [[[len, header], [sequence]], ...]
if ignore_ends is False:
for id, seq in list(assembled.items()):
sequence = []
for i, base in enumerate(s2c[id]):
if check_cov(base, cov_thresh) is True:
if sequence == []:
start = i
sequence.append(seq[2][i])
else:
if sequence != []:
fragments.append([[len(sequence), '>%s_f:%s' % (id, start)], ''.join(sequence)])
sequence = []
if sequence != []:
fragments.append([[len(sequence), '>%s_f:%s' % (id, start)], ''.join(sequence)])
else:
for id, seq in list(assembled.items()):
sequence = []
for i, base in enumerate(s2c[id]):
if check_cov(base, cov_thresh) is True or (i < 100 or i > (seq[1] - 100)):
if sequence == []:
start = i
sequence.append(seq[2][i])
else:
if sequence != []:
fragments.append([[len(sequence), '>%s_f:%s' % (id, start)], ''.join(sequence)])
sequence = []
if sequence != []:
fragments.append([[len(sequence), '>%s_f:%s' % (id, start)], ''.join(sequence)])
fragments.sort(key = lambda x: x[0][0], reverse = True)
return fragments
def re_assemble_velvet(pr, prefix, scaffold, error, scaffolding, min_contig):
"""
re-assemble reads using velvet
"""
out = '%s/s_%s_e_%s' % (prefix, scaffold, error)
velvet(paired = [pr], out = out, scaffolding = scaffolding, \
silent = True, min_contig = min_contig, kmer_min = 21, kmer_max = 71, kmer_increase = 10)
re_assembled_seqs = {}
assembled_fasta = open('%s/velvet_s-%s.e_%s.fa' % (out, scaffold, error), 'w')
for fasta in glob('%s/*.fasta' % (out)):
for seq in parse_fasta(fasta):
if seq[0] == []:
continue
re_assembled_seqs[seq[0].split('>')[1]] = [seq[0], len(seq[1]), seq[1]]
print('\n'.join(seq), file=assembled_fasta)
assembled_fasta.close()
return assembled_fasta.name, re_assembled_seqs
def re_assemble_shorty(pr, prefix, scaffold, error, min_contig):
"""
re-assemble reads using shorty
"""
out = '%s/s_%s_e_%s' % (prefix, scaffold, error) # shorty
os.system('mkdir -p %s' % out) # shorty
assembled_fasta = '%s/shorty_s-%s.e-%s.fa' % (out, scaffold, error) # shorty
p = subprocess.Popen('shorty -s %s -q %s | fix_fasta.py - > %s' \
% (min_contig, pr, assembled_fasta), shell = True)
p.communicate()
re_assembled_seqs = {seq[0].split('>')[1]: [seq[0], len(seq[1]), seq[1]] for seq in parse_fasta(assembled_fasta)}
return assembled_fasta, re_assembled_seqs
def re_assemble_minimo(pr, prefix, scaffold, error, min_contig):
"""
re-assemble reads using shorty
"""
current = os.getcwd()
out = '%s/s_%s_e_%s' % (prefix, scaffold, error)
os.system('mkdir -p %s' % out)
assembled_fasta = '%s/minimo_s_%s.e_%s.fa' % (out, scaffold, error)
if os.path.exists(assembled_fasta):
re_assembled_seqs = {seq[0].split('>')[1]: [seq[0], len(seq[1]), seq[1]] for seq in parse_fasta(assembled_fasta) if seq[0] is not list}
return assembled_fasta, re_assembled_seqs
fasta = open('%s.fa' % (pr.rsplit('.', 2)[0]), 'w')
for seq in fastq2fasta(pr, True):
print('\n'.join(seq), file=fasta)
fasta.close()
p = subprocess.Popen('cd %s; Minimo reads.fa \
-D MIN_LEN=50 -D MIN_IDENT=98 -D FASTA_EXP=1 -D OUT_PREFIX=minimo -D ACE_EXP=0 > minimo.log; \
fix_fasta.py minimo-contigs.fa | fasta_length.py - %s > %s; cd %s' \
% (out, min_contig, assembled_fasta.rsplit('/', 1)[1], current), shell = True)
p.communicate()
re_assembled_seqs = {seq[0].split('>')[1]: [seq[0], len(seq[1]), seq[1]] for seq in parse_fasta(assembled_fasta) if type(seq[0]) is str}
return assembled_fasta, re_assembled_seqs
def idba_ud_seqs(fasta_name, idba_dir):
"""
get fasta file from idba_ud assembly directory, save to dictionary
"""
out = '%s/scaffold.fa' % (idba_dir)
if os.path.exists(out) is False:
out = '%s/contig.fa' % (idba_dir)
re_assembled_seqs = \
{seq[0].split('>')[1].replace(' ', '_'): [seq[0].replace(' ', '_'), len(seq[1]), seq[1]] \
for seq in parse_fasta(out) if type(seq[0]) is str}
os.system('cat %s | tr " " "_" > %s' % (out, fasta_name))
return re_assembled_seqs
def re_assemble_idba_ud(pr, prefix, scaffold, error, min_contig, threads):
"""
re-assemble reads using idba_ud
"""
current = os.getcwd()
out = '%s/s_%s_e_%s' % (prefix, scaffold, error)
idba_out = '%s/%s' % (out, 'idba_ud')
os.system('mkdir -p %s' % out)
assembled_fasta = '%s/idba_ud_s_%s.e_%s.fa' % (out, scaffold, error)
if os.path.exists(idba_out):
re_assembled_seqs = idba_ud_seqs(assembled_fasta, idba_out)
return assembled_fasta, re_assembled_seqs
fasta = open('%s.fa' % (pr.rsplit('.', 2)[0]), 'w')
for seq in fastq2fasta(pr, True):
print('\n'.join(seq), file=fasta)
fasta.close()
p = subprocess.Popen('idba_ud --step 10 --pre_correction --similar 0.98 -r %s -o %s --num_threads %s --min_contig %s \
>> idba_ud.log 2>> idba_ud.log' \
% (fasta.name, idba_out, threads, min_contig), shell = True)
out, error = p.communicate()
return assembled_fasta, idba_ud_seqs(assembled_fasta, idba_out)
def re_assemble_fragments(reads, prefix, threads, cov_thresh, mismatches, multiple, scaffolding = True, assembler = 'velvet', min_contig = '200'):
"""
re-assemble misassembled regions, return fragments (sorted longest to shortest) representative of re-assembled
regions with coverage by stringently mapped reads
* assembler == 'velvet' or 'shorty'
"""
re_assembled = {}
for scaffold, errors in list(reads.items()):
re_assembled[scaffold] = {}
for error, pr in list(errors.items()):
if assembler == 'velvet':
assembled_fasta, re_assembled_seqs = \
re_assemble_velvet(pr, prefix, scaffold, error, scaffolding, min_contig)
elif assembler == 'shorty':
assembled_fasta, re_assembled_seqs = \
re_assemble_shorty(pr, prefix, scaffold, error, min_contig)
elif assembler == 'minimo':
assembled_fasta, re_assembled_seqs = \
re_assemble_minimo(pr, prefix, scaffold, error, min_contig)
elif assembler == 'idba_ud':
assembled_fasta, re_assembled_seqs = \
re_assemble_idba_ud(pr, prefix, scaffold, error, min_contig, threads)
else:
print('# please specify valid assembler', file=sys.stderr)
exit()
if len(re_assembled_seqs) == 0:
re_assembled[scaffold][error] = False
continue
# map reads to re-assembled scaffolds and identify zero coverage regions
mapping, pr_split = map_reads(assembled_fasta, re_assembled_seqs, pr, threads, multiple)
pairs, header = parse_mapping(mapping)
s2c, s2errors = id_errors(pairs, header, assembled_fasta, re_assembled_seqs, cov_thresh, mismatches, allow_orphan = True)
fragments = break_by_coverage(re_assembled_seqs, s2c, cov_thresh, ignore_ends = False)
f_out = open('%s/s_%s_e_%s/fragments.fa' % (prefix, scaffold, error), 'w')
for f in fragments:
print('\n'.join([f[0][1], f[1]]), file=f_out)
f_out.close()
re_assembled[scaffold][error] = fragments
return re_assembled
def patch_start(orig, cov, error, n_error, patches, merked, k, cov_thresh, buffer):
"""
use re-assembled fragments to extend scaffold from start
"""
length = 0
trimmed = False
for i, c in enumerate(cov):
if length < k + buffer:
if check_cov(c, cov_thresh) is False:
start = []
else:
start.append(orig[i])
length = len(start)
else:
start = ''.join(start).upper()
if n_error is False:
trimmed = orig[(i - k - buffer):]
else:
trimmed = orig[(i - k - buffer):n_error]
break
if trimmed is False: # no region of original sequence passed cov. threshold
return False
attempts = []
for patch in patches:
patch = patch[1].upper()
i = patch.find(start)
if i == -1:
patch = rc(['', patch])[1]
i = patch.find(start)
if i != -1:
patch = patch[0:i]
if len(patch) > 0:
attempts.append([len(patch), patch])
if len(attempts) > 0:
best = sorted(attempts, key = lambda x: x[0], reverse = True)[0][1]
return [best, trimmed]
else: # what if you could not extend the scaffold?
return [trimmed]
def patch_end(orig, cov, error, n_error, patches, merged, k, origA, origB, start, stop, buffer):
"""
use re-assembled fragments to extend scaffold from end
"""
attempts = []
for patch in patches:
patch = patch[1].upper()
i = patch.find(start)
if i == -1:
patch = rc(['', patch])[1]
i = patch.find(start)
if i != -1:
patch = patch[(i + k + buffer):]
attempts.append([len(patch), patch])
if len(attempts) > 0:
best = sorted(attempts, key = lambda x: x[0], reverse = True)[0][1]
return best
else: # what if you could not extend the scaffold?
return False
def patch_middle(orig, cov, error, n_error, patches, merged, k, origA, origB, origM, start, stop, buffer):
"""
use re-assembled fragments to patch mis-assembly in middle of scaffold
"""
attemptsA = []
for patch in patches:
patch = patch[1].upper()
p_start = patch.find(start)
if p_start == -1:
patch = rc(['', patch])[1]
p_start = patch.find(start)
if p_start == -1:
continue
p = patch[(p_start + k + buffer):]
attemptsA.append([len(p), p])
p_stop = patch.find(stop)
if p_stop == -1:
patch = rc(['', patch])[1]
p_stop = patch.find(stop)
if p_stop == -1:
continue
p = patch[(p_start + k + buffer):(p_stop)]
if len(p) == 0:
continue
return [0, p]
attemptsB = []
for patch in patches: # extend origB if possible
patch = patch[1].upper()
p_start = patch.find(start)
if p_start == -1:
patch = rc(['', patch])[1]
p_start = patch.find(start)
if p_start != -1:
continue
p_stop = patch.find(stop)
if p_stop == -1:
patch = rc(['', patch])[1]
p_stop = patch.find(stop)
if p_stop == -1:
continue
p = patch[:p_stop]
attemptsB.append([len(p), p])
# what if only the start kmer could be found?
if len(attemptsA) > 0:
bestA = sorted(attemptsA, key = lambda x: x[0], reverse = True)[0][1]
if len(bestA) > 20:
if len(attemptsB) > 0:
bestB = sorted(attemptsB, key = lambda x: x[0], reverse = True)[0][1]
if len(bestB) > 20:
return [12, [bestA, bestB]]
return [1, bestA]
# what if only the stop kmer could be found?
if len(attemptsB) > 0:
bestB = sorted(attemptsB, key = lambda x: x[0], reverse = True)[0][1]
if len(bestB) > 20:
return [2, bestB]
return False # could not find start and stop in any fragment
def find_start_stop_error(orig, cov, error, n_error, patches, merged, k, cov_thresh, buffer):
"""
find regions of length k flanking region with error
"""
start = orig[(error - k - buffer):(error - buffer)].upper()
origA = orig[:error].upper()
length = 0
for i, c in enumerate(cov[error:], error): # find stop sequence
if length < k + buffer:
if check_cov(c, cov_thresh) is False:
stop = []
else:
stop.append(orig[i])
length = len(stop)
else:
break
stop = ''.join(stop[buffer:]).upper()
if n_error is False:
origB = orig[(i - k):]
else:
origB = orig[(i - k):n_error]
origM = orig[error:(i - k)] # mismatched sequence
return origA, origB, origM, start, stop
def check_overlap_seqs(mA, mB, k):
"""
see if extensions overlap with one another
"""
kmer = mA[(len(mA) - k):]
start = mB.find(kmer)
if start == -1:
return False
return mA + mB[(start + k):]
def patch_contig(orig, cov, error, n_error, patches, merged, scaffold, cov_thresh, k = 10, buffer = 5):
"""
patch original assembly with re-assembled fragment
buffer = extra space before/after low coverage region
"""
# check to see if error is at the beginning of scaffold, if so - extend it
if error == 0 and patches is not False:
merged = patch_start(orig, cov, error, n_error, patches, merged, k, cov_thresh, buffer)
return merged
origA, origB, origM, start, stop = find_start_stop_error(orig, cov, error, n_error, patches, merged, k, cov_thresh, buffer)
if merged == []:
merged = [origA] # if not, origA is already there
# check to make sure re-assembly was successful
if patches is False:
merged.append('\n>%s_e:%s\n' % (scaffold, error))
merged.append(origB)
return merged
if len(stop) < k: # error is at the end of the scaffold
extended = patch_end(orig, cov, error, n_error, patches, merged, k, origA, origB, start, stop, buffer)
if extended is not False and merged is not False:
merged.append(extended)
return merged
else:
return merged
# error is in the middle of the scaffold
middle = patch_middle(orig, cov, error, n_error, patches, merged, k, origA, origB, origM, start, stop, buffer)
if middle is False: # was not able to find start or stop sequence in re-assembly
if len(origB) > 0:
merged.append('\n>%s_e:%s\n' % (scaffold, error))
merged.append(origB)
return merged
if middle[0] == 0: # was able to find start and stop sequences in the re-assembly
middle = middle[1]
merged.append(middle)
merged.append(origB)
print('\t\t fixed')
return merged
if middle[0] == 1: # was only able to find the start (not the stop) sequence in the re_assembly
middle = middle[1]
merged.append(middle)
if len(origB) > 0:
merged.append('\n>%s_e:%s\n' % (scaffold, error))
merged.append(origB)
return merged
if middle[0] == 2: # was only able to find the stop (not the start) sequence in the re_assembly
middle = middle[1]
merged.append('\n>%s_e:%s\n' % (scaffold, error))
merged.append(middle)
if len(origB) > 0:
merged.append(origB)
return merged
if middle[0] == 12: # was able to find the start and the stop, but not on the same re-assembled scaffold
mA, mB = middle[1][0], middle[1][1]
combined = check_overlap_seqs(mA, mB, k)
if combined is False:
merged.append(mA)
merged.append('\n>%s_e:%s\n' % (scaffold, error))
merged.append(mB)
if len(origB) > 0:
merged.append(origB)
return merged
else:
merged.append(combined)
print('\t\t fixed')
return merged
def merge_assemblies(assembly, scaffolds, s2c, s2errors, re_assembled, combine_windows, prefix, cov_thresh):
"""
merge asseembly and re-assembly
"""
merged_assembly = open('%s/re_assembled-draft.fa' % (prefix), 'w')
if combine_windows is True:
for s in re_assembled:
for w, i in list(re_assembled[s].items()):
if type(w) is str:
for r in w.split('_'):
re_assembled[s][int(r)] = i
del re_assembled[s][w]
for id, seq in list(scaffolds.items()):
print(id)
merged = []
if id not in s2errors:
print('\n'.join([seq[0], seq[2]]), file=merged_assembly)
errors = sorted(s2errors[id])
print('\terrors: %s' % (errors))
for i, error in enumerate(errors):
print('\terror: %s' % error)
if (i + 1) >= len(errors):
n_error = False
else:
n_error = errors[i + 1]
merged = patch_contig(seq[2], s2c[id], error, n_error, re_assembled[id][error], merged, id, cov_thresh)
if merged is not False:
merged.insert(0, '>%s\n' % id)
print(''.join(merged), file=merged_assembly)
merged_assembly.close()
return merged_assembly.name
def final_check(assembly, pr_split, threads, cov_thresh, mismatches, collection_mismatches, prefix, multiple):
"""
- check re-assembled contigs for zero coverage regions, split at these points
- re-map to final version
"""
scaffolds, mapping, s2c, s2errors, reads, pr_split = \
check_assembly(assembly, False, threads, cov_thresh, mismatches, collection_mismatches, multiple, prefix, pr_split = pr_split, allow_orphan = False, allow_orphan_ends = True)
final_fasta = open('%s/re_assembled-final.fa' % (prefix), 'w')
for seq in break_by_coverage(scaffolds, s2c, cov_thresh, ignore_ends = False):
print('\n'.join([seq[0][1], seq[1]]), file=final_fasta)
final_fasta.close()
# check final assembly
return check_assembly(final_fasta.name, False, threads, cov_thresh, mismatches, collection_mismatches, multiple, prefix, pr_split = pr_split, allow_orphan = False, allow_orphan_ends = True)
def parse_mapping(mapping, ends = False, scaffolds = False):
"""
create a paired-read dictionary from sam files
* pairs[read] = [bit, mate, mappping[scaffold] = [map, map2, ...], fastq]
* map = [overlap, mismatches, sam_info]
* sam_info = all sam lines except read + quality
"""
pairs = {}
header = []
for line in open(mapping):
if line.startswith('@'):
header.append(line.strip())
continue
line = line.strip().split()
read, bit, scaffold, start = line[0:4]
bit, start = int(bit), int(start)
r = [start, start + len(line[9]) - 1]
mismatches = map_tool.count_mismatches(line)
fastq = map_tool.sam2fastq(line)
info = [line[0:9], line[11:]]
if '/' in read:
read = read.rsplit('/', 1)[0]
if bin(bit)[-7] == '1': # first sequence in pair
read = '%s_1' % (read)
mate = '%s_2' % (read.rsplit('_', 1)[0])
else:
read = '%s_2' % (read)
mate = '%s_1' % (read.rsplit('_', 1)[0])
if ends is not False and (r[0] > ends and r[1] < scaffolds[scaffold][1] - ends):
continue
if read not in pairs:
pairs[read] = [bit, mate, {}, fastq]
if scaffold not in pairs[read][2]:
pairs[read][2][scaffold] = []
pairs[read][2][scaffold].append([r, mismatches, info])
return pairs, header
def check_assembly(assembly, pr, threads, cov_thresh, mismatches, collection_mismatches, multiple, prefix, window = False, combine_windows = False, pr_split = False, allow_orphan = False, allow_orphan_ends = False):
"""
check assembly for mismatches
"""
# read assembly into memory
scaffolds = {i[0].split('>')[1]: [i[0], len(i[1]), i[1]] for i in parse_fasta(assembly) if i != [[], []]}
# map reads to assembly
mapping, pr_split = map_reads(assembly, scaffolds, pr, threads, multiple, pr_split = pr_split)
# identfy errors (no coverage regions) based on stringent mapping
# * s2errors = [positions with errors]
# * s2c[scaffold] = [per scaffold coverage at each position]
# * filtered_mapping_both = sam file requiring both reads to map within mismatch criteria
pairs, header = parse_mapping(mapping)
s2c, s2errors = id_errors(pairs, header, assembly, scaffolds, cov_thresh, mismatches, allow_orphan = allow_orphan, allow_orphan_ends = allow_orphan_ends)
# collect reads that map to either window or entire contigs
# * filtered_mapping_one = sam file requiring one of paired reads to map within mismatch criteria
# * reads[scaffold][error] = pe
if window is False:
reads = collect_reads(pairs, assembly, scaffolds, collection_mismatches, prefix)
else:
reads = collect_reads(pairs, assembly, scaffolds, collection_mismatches, prefix, s2errors, window, combine_windows)
return scaffolds, mapping, s2c, s2errors, reads, pr_split
def format_assembly(fasta, prefix):
"""
get rid of strange characters in assembly, make new file in curated directory
"""
if '/' in fasta:
fixed = open('%s/%s' % (prefix, fasta.rsplit('/', 1)[1]), 'w')
else:
fixed = open('%s/%s' % (prefix, fasta), 'w')
for seq in fix_fasta(fasta):
print('\n'.join(seq), file=fixed)
fixed.close()
return fixed.name
def curate_assembly(assembly, pr, pr_split, prefix, \
threads, mismatches = 1, collection_mismatches = 2, window = 1000, combine_windows = True, cov_thresh = 1, multiple = True, assembler = 'velvet'):
"""
identify and correct scaffolding errors in assembly
"""
os.system('mkdir -p %s' % (prefix))
assembly = format_assembly(assembly, prefix)
scaffolds, mapping, s2c, s2errors, mapped_reads, pr_split = \
check_assembly(assembly, pr, threads, cov_thresh, mismatches, collection_mismatches, multiple, prefix, window, combine_windows, pr_split = pr_split, allow_orphan = False, allow_orphan_ends = False)
re_assembled = re_assemble_fragments(mapped_reads, prefix, threads, cov_thresh, mismatches, multiple, assembler = assembler)
merged = merge_assemblies(assembly, scaffolds, s2c, s2errors, re_assembled, combine_windows, prefix, cov_thresh)
return final_check(merged, pr_split, threads, cov_thresh, mismatches, collection_mismatches, prefix, multiple)
if __name__ == '__main__':
if len(sys.argv) != 7:
print('specify threads, fasta for assembly, mismatches tolerated, paired reads (fastq), pair 1 (fastq), and pair 2 (fastq)', file=sys.stderr)
exit()
for i, v in enumerate(sys.argv[3:], 3):
if v == 'False' or v == 'FALSE' or v == 'false':
sys.argv[i] = False
threads, assembly, mm, pr, pr1, pr2 = sys.argv[1:]
if pr1 is False or pr2 is False:
pr_split = False
else:
pr_split = [pr1, pr2]
if '/' in assembly:
prefix = '%s.curated' % (assembly.rsplit('.', 1)[0].rsplit('/', 1)[1])
else:
prefix = '%s.curated' % (assembly.rsplit('.', 1)[0])
curate_assembly(assembly, pr, pr_split, prefix, mismatches = int(mm), threads = int(threads))
|
christophertbrown/fix_assembly_errors
|
ctbRA/re_assemble_errors.py
|
Python
|
gpl-2.0
| 38,555
|
[
"Bowtie"
] |
f3584afa1d8c8e3d594a8a517967c5e0dbed879735fceb8e4c2aeaef049b5317
|
########################################################################
# $HeadURL $
# File: StorageElementProxyHandler.py
########################################################################
"""
:mod: StorageElementProxyHandler
.. module: StorageElementProxyHandler
:synopsis: This is a service which represents a DISET proxy to the Storage Element component.
This is used to get and put files from a remote storage.
"""
__RCSID__ = "$Id$"
import os
import shutil
import SocketServer
import threading
import socket
import random
from types import DictType, ListType, StringType, StringTypes
## from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.Core.Utilities.Os import getDiskSpace
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.DataManagementSystem.private.HttpStorageAccessHandler import HttpStorageAccessHandler
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
## globals
BASE_PATH = ""
HTTP_FLAG = False
HTTP_PORT = 9180
HTTP_PATH = ""
def purgeCacheDirectory( path ):
""" del recursively :path: """
shutil.rmtree( path )
gRegister = DictCache(purgeCacheDirectory)
def initializeStorageElementProxyHandler( serviceInfo ):
""" handler initialisation """
global BASE_PATH, HTTP_FLAG, HTTP_PORT, HTTP_PATH
cfgPath = serviceInfo['serviceSectionPath']
BASE_PATH = gConfig.getValue( "%s/BasePath" % cfgPath, BASE_PATH )
if not BASE_PATH:
gLogger.error( 'Failed to get the base path' )
return S_ERROR( 'Failed to get the base path' )
gLogger.info('The base path obtained is %s. Checking its existence...' % BASE_PATH)
if not os.path.exists(BASE_PATH):
gLogger.info('%s did not exist. Creating....' % BASE_PATH)
os.makedirs(BASE_PATH)
HTTP_FLAG = gConfig.getValue( "%s/HttpAccess" % cfgPath, False )
if HTTP_FLAG:
HTTP_PATH = '%s/httpCache' % BASE_PATH
HTTP_PATH = gConfig.getValue( "%s/HttpCache" % cfgPath, HTTP_PATH )
if not os.path.exists( HTTP_PATH ):
gLogger.info('Creating HTTP cache directory %s' % (HTTP_PATH) )
os.makedirs( HTTP_PATH )
HTTP_PORT = gConfig.getValue( "%s/HttpPort" % cfgPath, 9180 )
gLogger.info('Creating HTTP server thread, port:%d, path:%s' % ( HTTP_PORT, HTTP_PATH ) )
httpThread = HttpThread( HTTP_PORT, HTTP_PATH )
return S_OK()
class ThreadedSocketServer( SocketServer.ThreadingMixIn, SocketServer.TCPServer ):
""" bag dummy class to hold ThreadingMixIn and TCPServer """
pass
class HttpThread( threading.Thread ):
"""
.. class:: HttpThread
Single daemon thread runing HttpStorageAccessHandler.
"""
def __init__( self, port, path ):
""" c'tor """
self.port = port
self.path = path
threading.Thread.__init__( self )
self.setDaemon( 1 )
self.start()
def run( self ):
""" thread run """
global gRegister, BASE_PATH
os.chdir( self.path )
handler = HttpStorageAccessHandler
handler.register = gRegister
handler.basePath = self.path
httpd = ThreadedSocketServer( ("", self.port), handler )
httpd.serve_forever()
class StorageElementProxyHandler(RequestHandler):
"""
.. class:: StorageElementProxyHandler
"""
types_callProxyMethod = [ StringType, StringType, ListType, DictType ]
def export_callProxyMethod( self, se, name, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 200, self.__proxyWrapper, se, name, args, kargs )
if res['OK']:
return res['Value']
return res
def __proxyWrapper( self, se, name, args, kargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
"""
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
storageElement = StorageElement( se )
method = getattr( storageElement, name ) if hasattr( storageElement, name ) else None
if not method:
return S_ERROR( "Method '%s' isn't implemented!" % name )
if not callable( getattr( storageElement, name ) ):
return S_ERROR( "Attribute '%s' isn't a method!" % name )
return method( *args, **kargs )
types_uploadFile = [ StringType, StringType ]
def export_uploadFile( self, se, pfn ):
""" This method uploads a file present in the local cache to the specified storage element
"""
res = pythonCall( 300, self.__uploadFile, se, pfn )
if res['OK']:
return res['Value']
return res
def __uploadFile(self, se, pfn):
""" proxied upload file """
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
# Put file to the SE
try:
storageElement = StorageElement(se)
except AttributeError, x:
errStr = "__uploadFile: Exception while instantiating the Storage Element."
gLogger.exception( errStr, se, str(x) )
return S_ERROR(errStr)
putFileDir = "%s/putFile" % BASE_PATH
localFileName = "%s/%s" % ( putFileDir, os.path.basename(pfn) )
res = returnSingleResult( storageElement.putFile( { pfn : localFileName } ) )
if not res['OK']:
gLogger.error("prepareFile: Failed to put local file to storage.", res['Message'] )
# Clear the local cache
try:
shutil.rmtree(putFileDir)
gLogger.debug("Cleared existing putFile cache")
except Exception, x:
gLogger.exception("Failed to remove destination dir.", putFileDir, x )
return res
types_prepareFile = [ StringType, StringType ]
def export_prepareFile(self, se, pfn):
""" This method simply gets the file to the local storage area
"""
res = pythonCall( 300, self.__prepareFile, se, pfn )
if res['OK']:
return res['Value']
return res
def __prepareFile( self, se, pfn ):
""" proxied prepare file """
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
# Clear the local cache
getFileDir = "%s/getFile" % BASE_PATH
if os.path.exists(getFileDir):
try:
shutil.rmtree(getFileDir)
gLogger.debug("Cleared existing getFile cache")
except Exception, x:
gLogger.exception("Failed to remove destination directory.", getFileDir, x )
os.mkdir(getFileDir)
# Get the file to the cache
try:
storageElement = StorageElement(se)
except AttributeError, x:
errStr = "prepareFile: Exception while instantiating the Storage Element."
gLogger.exception( errStr, se, str(x) )
return S_ERROR(errStr)
res = returnSingleResult( storageElement.getFile( pfn, localPath = "%s/getFile" % BASE_PATH ) )
if not res['OK']:
gLogger.error( "prepareFile: Failed to get local copy of file.", res['Message'] )
return res
return S_OK()
types_prepareFileForHTTP = [ list(StringTypes)+[ListType] ]
def export_prepareFileForHTTP(self, lfn):
""" This method simply gets the file to the local storage area using LFN
"""
# Do clean-up, should be a separate regular thread
gRegister.purgeExpired()
key = str( random.getrandbits( 128 ) )
result = pythonCall( 300, self.__prepareFileForHTTP, lfn, key )
if result['OK']:
result = result['Value']
if result['OK']:
if HTTP_FLAG:
host = socket.getfqdn()
url = 'http://%s:%d/%s' % ( host, HTTP_PORT, key )
gRegister.add( key, 1800, result['CachePath'] )
result['HttpKey'] = key
result['HttpURL'] = url
return result
return result
return result
def __prepareFileForHTTP( self, lfn, key ):
""" proxied preapre file for HTTP """
global HTTP_PATH
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
# Clear the local cache
getFileDir = "%s/%s" % ( HTTP_PATH, key )
os.makedirs(getFileDir)
# Get the file to the cache
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dataMgr = DataManager()
result = dataMgr.getFile( lfn, destinationDir = getFileDir )
result['CachePath'] = getFileDir
return result
############################################################
#
# This is the method to setup the proxy and configure the environment with the client credential
#
def __prepareSecurityDetails(self):
""" Obtains the connection details for the client
"""
try:
credDict = self.getRemoteCredentials()
clientDN = credDict['DN']
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
res = gProxyManager.downloadVOMSProxy( clientDN, clientGroup )
if not res['OK']:
return res
chain = res['Value']
proxyBase = "%s/proxies" % BASE_PATH
if not os.path.exists(proxyBase):
os.makedirs(proxyBase)
proxyLocation = "%s/proxies/%s-%s" % ( BASE_PATH, clientUsername, clientGroup )
gLogger.debug("Obtained proxy chain, dumping to %s." % proxyLocation)
res = gProxyManager.dumpProxyToFile( chain, proxyLocation )
if not res['OK']:
return res
gLogger.debug("Updating environment.")
os.environ['X509_USER_PROXY'] = res['Value']
return res
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR(exStr)
############################################################
#
# These are the methods that are for actual file transfer
#
def transfer_toClient( self, fileID, token, fileHelper ):
""" Method to send files to clients.
fileID is the local file name in the SE.
token is used for access rights confirmation.
"""
file_path = "%s/%s" % ( BASE_PATH, fileID )
result = fileHelper.getFileDescriptor( file_path, 'r' )
if not result['OK']:
result = fileHelper.sendEOF()
# check if the file does not really exist
if not os.path.exists(file_path):
return S_ERROR('File %s does not exist' % os.path.basename(file_path))
else:
return S_ERROR('Failed to get file descriptor')
fileDescriptor = result['Value']
result = fileHelper.FDToNetwork(fileDescriptor)
if not result['OK']:
return S_ERROR('Failed to get file %s' % fileID )
return result
def transfer_fromClient( self, fileID, token, fileSize, fileHelper ):
""" Method to receive file from clients.
fileID is the local file name in the SE.
fileSize can be Xbytes or -1 if unknown.
token is used for access rights confirmation.
"""
if not self.__checkForDiskSpace( BASE_PATH, fileSize ):
return S_ERROR('Not enough disk space')
file_path = "%s/%s" % ( BASE_PATH, fileID )
if not os.path.exists( os.path.dirname( file_path ) ):
os.makedirs( os.path.dirname( file_path ) )
result = fileHelper.getFileDescriptor( file_path, 'w' )
if not result['OK']:
return S_ERROR('Failed to get file descriptor')
fileDescriptor = result['Value']
result = fileHelper.networkToFD(fileDescriptor)
if not result['OK']:
return S_ERROR('Failed to put file %s' % fileID )
return result
@staticmethod
def __checkForDiskSpace( dpath, size ):
""" Check if the directory dpath can accomodate 'size' volume of data
"""
dsize = (getDiskSpace(dpath)-1)*1024*1024
maxStorageSizeBytes = 1024*1024*1024
return ( min(dsize, maxStorageSizeBytes) > size )
|
sposs/DIRAC
|
DataManagementSystem/Service/StorageElementProxyHandler.py
|
Python
|
gpl-3.0
| 11,822
|
[
"DIRAC"
] |
fc6a98cab0262a395a94c28d344b319d189baecb0c333184b02febb7db22d8b3
|
#!/usr/bin/python
"""Test of sayAll."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'News'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Projects'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Art'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Support'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Development'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Community'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'live.gnome.org'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'Titles'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'grayed'",
"SPEECH OUTPUT: 'Text'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'grayed'",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'RecentChanges'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'FindPage'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'HelpContents'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'en Español'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Mailing List'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ') |'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'DocIndex'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca Logo'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'HOT HOT HOT: Notes on'",
"SPEECH OUTPUT: 'access to Firefox 3.0'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Contents'",
"SPEECH OUTPUT: '1.'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '4.'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '6.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '7.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '8.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and powerful assistive technology for people with visual impairments.'",
"SPEECH OUTPUT: 'Using various combinations of speech synthesis, braille, and magnification, Orca helps provide access to applications and toolkits that support the AT-SPI \\(e.g.,'",
"SPEECH OUTPUT: 'the GNOME desktop\\).'",
"SPEECH OUTPUT: 'The development of Orca has been led by the'",
"SPEECH OUTPUT: 'Accessibility Program Office of Sun Microsystems, Inc.'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'with'",
"SPEECH OUTPUT: 'contributions from many community members'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'The complete list of work to do, including bugs and feature requests, along with known problems in other components, is maintained in'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(please see our'",
"SPEECH OUTPUT: 'notes on how we use Bugzilla'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ').'",
"SPEECH OUTPUT: 'Please join and participate on the'",
"SPEECH OUTPUT: 'Orca mailing list'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '): it's a helpful, kind, and productive environment composed of users and developers.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Darragh \xd3 H\xe9iligh'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'has created several audio guides for Orca.'",
"SPEECH OUTPUT: 'This is a fantastic contribution (THANKS!)!!!'",
"SPEECH OUTPUT: 'The audio guides can be found at'",
"SPEECH OUTPUT: 'http://www.digitaldarragh.com/linuxat.asp'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'and include the following:'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Guide to installing the latest versions of Firefox and Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'As of GNOME 2.16,'",
"SPEECH OUTPUT: 'Orca is a part of the GNOME platform.'",
"SPEECH OUTPUT: 'As a result, Orca is already provided by default on a number of operating system distributions, including'",
"SPEECH OUTPUT: 'Open Solaris'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'Ubuntu'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Download/Installation page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information on various distributions as well as installing Orca directly from source.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'The command to run orca is orca.'",
"SPEECH OUTPUT: 'You can enter this command by pressing Alt+F2 when logged in, waiting for a second or so, then typing orca and pressing return.'",
"SPEECH OUTPUT: 'Orca is designed to present information as you navigate the desktop using the'",
"SPEECH OUTPUT: 'built-in navigation mechanisms of GNOME'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'These navigation mechanisms are consistent across most desktop applications.'",
"SPEECH OUTPUT: 'You may sometimes wish to control Orca itself, such as bringing up the'",
"SPEECH OUTPUT: 'Orca Configuration GUI'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(accessed by pressing Insert+Space when Orca is running) and for using flat review mode to examine a window.'",
"SPEECH OUTPUT: 'Refer to'",
"SPEECH OUTPUT: 'Orca Keyboard Commands'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(Laptop Layout)'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for more information on Orca-specific keyboard commands.'",
"SPEECH OUTPUT: 'The'",
"SPEECH OUTPUT: 'Orca Configuration GUI'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'also includes a \"Key Bindings\" tab that allows you to get a complete list of Orca key bindings.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Configuration/Use page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca is designed to work with applications and toolkits that support the assistive technology service provider interface (AT-SPI).'",
"SPEECH OUTPUT: 'This includes the GNOME desktop and its applications,'",
"SPEECH OUTPUT: 'OpenOffice'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', Firefox, and the Java platform.'",
"SPEECH OUTPUT: 'Some applications work better than others, however, and the Orca community continually works to provide compelling access to more and more applications.'",
"SPEECH OUTPUT: 'On the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', you will find a growing list of information regarding various applications that can be accessed with Orca as well as tips and tricks for using them.'",
"SPEECH OUTPUT: 'The list is not to be a conclusive list of all applications.'",
"SPEECH OUTPUT: 'Rather, the goal is to provide a repository within which users can share experiences regarding applications they have tested.'",
"SPEECH OUTPUT: 'See also the'",
"SPEECH OUTPUT: 'Application Specific Settings'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'page for how to configure settings specific to an application.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'There's a bunch you can do!'",
"SPEECH OUTPUT: 'Please refer to the'",
"SPEECH OUTPUT: 'How Can I Help page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Frequently Asked Questions:'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Mailing list:'",
"SPEECH OUTPUT: 'orca-list@gnome.org'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Bug database:'",
"SPEECH OUTPUT: 'GNOME Bug Tracking System (Bugzilla)'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'current bug list'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Design documents:'",
"SPEECH OUTPUT: 'Orca Documentation Series'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Dive Into Python, Mark Pilgrim'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Python in a Nutshell, Alex Martelli'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Python Pocket Reference, Mark Lutz'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'The information on this page and the other Orca-related pages on this site are distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'CategoryAccessibility'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Orca (last edited 2007-12-07 22:09:22 by'",
"SPEECH OUTPUT: 'WillieWalker'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: 'User'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'Login'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Page'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'Immutable Page'",
"SPEECH OUTPUT: 'Info'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Attachments'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'More Actions:'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'GNOME World Wide'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'GnomeWorldWide'",
"SPEECH OUTPUT: 'image'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Copyright \xa9 2005, 2006, 2007'",
"SPEECH OUTPUT: 'The GNOME Project'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'Hosted by'",
"SPEECH OUTPUT: 'Red Hat'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/say_all_wiki_no_context.py
|
Python
|
lgpl-2.1
| 12,887
|
[
"ORCA"
] |
5b4c63a9c02627fb7c325cf0f962a75282d80a50eabe53e8d009fd689104512c
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import logging
import unittest
from telemetry.core import util
from telemetry.core.platform.profiler import perf_profiler
from telemetry.unittest import options_for_unittests
from telemetry.unittest import simple_mock
class TestPerfProfiler(unittest.TestCase):
def testPerfProfiler(self):
options = options_for_unittests.GetCopy()
if not perf_profiler.PerfProfiler.is_supported(options.browser_type):
logging.warning('PerfProfiler is not supported. Skipping test')
return
profile_file = os.path.join(
util.GetUnittestDataDir(), 'perf_report_output.txt')
perf_report_output = open(profile_file, 'r').read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([perf_report_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
setattr(mock_subprocess, 'PIPE', simple_mock.MockObject())
real_subprocess = perf_profiler.subprocess
perf_profiler.subprocess = mock_subprocess
try:
self.assertEqual(
perf_profiler.PerfProfiler.GetTopSamples(profile_file, 10),
{ 'v8::internal::StaticMarkingVisitor::MarkMapContents': 63615201,
'v8::internal::RelocIterator::next': 38271931,
'v8::internal::LAllocator::MeetConstraintsBetween': 42913933,
'v8::internal::FlexibleBodyVisitor::Visit': 31909537,
'v8::internal::LiveRange::CreateAssignedOperand': 42913933,
'void v8::internal::RelocInfo::Visit': 96878864,
'WebCore::HTMLTokenizer::nextToken': 48240439,
'v8::internal::Scanner::ScanIdentifierOrKeyword': 46054550,
'sk_memset32_SSE2': 45121317,
'v8::internal::HeapObject::Size': 39786862
})
finally:
perf_profiler.subprocess = real_subprocess
|
androidarmv6/android_external_chromium_org
|
tools/telemetry/telemetry/core/platform/profiler/perf_profiler_unittest.py
|
Python
|
bsd-3-clause
| 2,063
|
[
"VisIt"
] |
3f045be1319ac3b3330f0dbafda3dcc4059a81ac93a49078dfdca6a50dd15486
|
""" Hello Service is an example of how to build services in the DIRAC framework
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
class HelloHandler(RequestHandler):
@classmethod
def initializeHandler(cls, serviceInfo):
""" Handler initialization
"""
cls.defaultWhom = "World"
return S_OK()
def initialize(self):
""" Response initialization
"""
self.requestDefaultWhom = self.srv_getCSOption("DefaultWhom", HelloHandler.defaultWhom)
auth_sayHello = ['all']
types_sayHello = [six.string_types]
def export_sayHello(self, whom):
""" Say hello to somebody
"""
gLogger.notice("Called sayHello of HelloHandler with whom = %s" % whom)
if not whom:
whom = self.requestDefaultWhom
if whom.lower() == 'nobody':
return S_ERROR("Not greeting anybody!")
return S_OK("Hello " + whom)
|
yujikato/DIRAC
|
docs/source/DeveloperGuide/AddingNewComponents/DevelopingServices/HelloHandler.py
|
Python
|
gpl-3.0
| 1,047
|
[
"DIRAC"
] |
8172ea2a25b1507ccabd188cb26240887ccd81cf8c81a18e279592bf629117d7
|
''' AccountingCacheCommand
The AccountingCacheCommand class is a command module that collects command
classes to store accounting results in the accounting cache.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# FIXME: NOT Usable ATM
# missing doNew, doCache, doMaster
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.JEncode import strToIntDict
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites, getCESiteMapping
from DIRAC.ResourceStatusSystem.Command.Command import Command
class SuccessfullJobsBySiteSplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(SuccessfullJobsBySiteSplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns successfull jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
sites = None
if 'sites' in self.args:
sites = self.args['sites']
if sites is None:
sites = getSites()
if not sites['OK']:
return sites
sites = sites['Value']
if not sites:
return S_ERROR('Sites is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
successfulJobs = self.rClient.getReport('Job', 'NumberOfJobs', fromD, toD,
{'FinalStatus': ['Done'],
'Site': sites}, 'Site')
if not successfulJobs['OK']:
return successfulJobs
successfulJobs = successfulJobs['Value']
if 'data' not in successfulJobs:
return S_ERROR('Missing data key')
if 'granularity' not in successfulJobs:
return S_ERROR('Missing granularity key')
singlePlots = {}
successfulJobs['data'] = {site: strToIntDict(value) for site, value in successfulJobs['data'].items()}
for site, value in successfulJobs['data'].items():
if site in sites:
plot = {}
plot['data'] = {site: value}
plot['granularity'] = successfulJobs['granularity']
singlePlots[site] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class FailedJobsBySiteSplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(FailedJobsBySiteSplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns failed jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
sites = None
if 'sites' in self.args:
sites = self.args['sites']
if sites is None:
# FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = getSites()
if not sites['OK']:
return sites
sites = sites['Value']
if not sites:
return S_ERROR('Sites is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
failedJobs = self.rClient.getReport('Job', 'NumberOfJobs', fromD, toD,
{'FinalStatus': ['Failed'],
'Site': sites},
'Site')
if not failedJobs['OK']:
return failedJobs
failedJobs = failedJobs['Value']
if 'data' not in failedJobs:
return S_ERROR('Missing data key')
if 'granularity' not in failedJobs:
return S_ERROR('Missing granularity key')
failedJobs['data'] = {site: strToIntDict(value) for site, value in failedJobs['data'].items()}
singlePlots = {}
for site, value in failedJobs['data'].items():
if site in sites:
plot = {}
plot['data'] = {site: value}
plot['granularity'] = failedJobs['granularity']
singlePlots[site] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class SuccessfullPilotsBySiteSplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(SuccessfullPilotsBySiteSplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns successfull pilots using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
sites = None
if 'sites' in self.args:
sites = self.args['sites']
if sites is None:
# FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = getSites()
if not sites['OK']:
return sites
sites = sites['Value']
if not sites:
return S_ERROR('Sites is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
succesfulPilots = self.rClient.getReport('Pilot', 'NumberOfPilots', fromD, toD,
{'GridStatus': ['Done'],
'Site': sites},
'Site')
if not succesfulPilots['OK']:
return succesfulPilots
succesfulPilots = succesfulPilots['Value']
if 'data' not in succesfulPilots:
return S_ERROR('Missing data key')
if 'granularity' not in succesfulPilots:
return S_ERROR('Missing granularity key')
succesfulPilots['data'] = {site: strToIntDict(value) for site, value in succesfulPilots['data'].items()}
singlePlots = {}
for site, value in succesfulPilots['data'].items():
if site in sites:
plot = {}
plot['data'] = {site: value}
plot['granularity'] = succesfulPilots['granularity']
singlePlots[site] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class FailedPilotsBySiteSplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(FailedPilotsBySiteSplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns failed jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
sites = None
if 'sites' in self.args:
sites = self.args['sites']
if sites is None:
# FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = getSites()
if not sites['OK']:
return sites
sites = sites['Value']
if not sites:
return S_ERROR('Sites is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
failedPilots = self.rClient.getReport('Pilot', 'NumberOfPilots', fromD, toD,
{'GridStatus': ['Aborted'],
'Site': sites},
'Site')
if not failedPilots['OK']:
return failedPilots
failedPilots = failedPilots['Value']
if 'data' not in failedPilots:
return S_ERROR('Missing data key')
if 'granularity' not in failedPilots:
return S_ERROR('Missing granularity key')
failedPilots['data'] = {site: strToIntDict(value)for site, value in failedPilots['data'].items()}
singlePlots = {}
for site, value in failedPilots['data'].items():
if site in sites:
plot = {}
plot['data'] = {site: value}
plot['granularity'] = failedPilots['granularity']
singlePlots[site] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class SuccessfullPilotsByCESplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(SuccessfullPilotsByCESplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
def doCommand(self):
"""
Returns successfull pilots using the DIRAC accounting system for every CE
for the last self.args[0] hours
:params:
:attr:`CEs`: list of CEs (when not given, take every CE)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
ces = None
if 'ces' in self.args:
ces = self.args['ces']
if ces is None:
res = getCESiteMapping()
if not res['OK']:
return res
ces = list(res['Value'])
if not ces:
return S_ERROR('CEs is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
successfulPilots = self.rClient.getReport('Pilot', 'NumberOfPilots', fromD, toD,
{'GridStatus': ['Done'],
'GridCE': ces},
'GridCE')
if not successfulPilots['OK']:
return successfulPilots
successfulPilots = successfulPilots['Value']
if 'data' not in successfulPilots:
return S_ERROR('Missing data key')
if 'granularity' not in successfulPilots:
return S_ERROR('Missing granularity key')
successfulPilots['data'] = {site: strToIntDict(value) for site, value in successfulPilots['data'].items()}
singlePlots = {}
for ce, value in successfulPilots['data'].items():
if ce in ces:
plot = {}
plot['data'] = {ce: value}
plot['granularity'] = successfulPilots['granularity']
singlePlots[ce] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class FailedPilotsByCESplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(FailedPilotsByCESplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis['ReportGenerator']
else:
self.rgClient = RPCClient('Accounting/ReportGenerator')
self.rClient.rpcClient = self.rgClient
def doCommand(self):
"""
Returns failed pilots using the DIRAC accounting system for every CE
for the last self.args[0] hours
:params:
:attr:`CEs`: list of CEs (when not given, take every CE)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
ces = None
if 'ces' in self.args:
ces = self.args['ces']
if ces is None:
res = getCESiteMapping()
if not res['OK']:
return res
ces = list(res['Value'])
if not ces:
return S_ERROR('CEs is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
failedPilots = self.rClient.getReport('Pilot', 'NumberOfPilots', fromD, toD,
{'GridStatus': ['Aborted'],
'GridCE': ces},
'GridCE')
if not failedPilots['OK']:
return failedPilots
failedPilots = failedPilots['Value']
if 'data' not in failedPilots:
return S_ERROR('Missing data key')
if 'granularity' not in failedPilots:
return S_ERROR('Missing granularity key')
failedPilots['data'] = {site: strToIntDict(value) for site, value in failedPilots['data'].items()}
singlePlots = {}
for ce, value in failedPilots['data'].items():
if ce in ces:
plot = {}
plot['data'] = {ce: value}
plot['granularity'] = failedPilots['granularity']
singlePlots[ce] = plot
return S_OK(singlePlots)
################################################################################
################################################################################
class RunningJobsBySiteSplittedCommand(Command):
def __init__(self, args=None, clients=None):
super(RunningJobsBySiteSplittedCommand, self).__init__(args, clients)
if 'ReportsClient' in self.apis:
self.rClient = self.apis['ReportsClient']
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis['ReportGenerator']
else:
self.rgClient = RPCClient('Accounting/ReportGenerator')
self.rClient.rpcClient = self.rgClient
def doCommand(self):
"""
Returns running and runned jobs, querying the WMSHistory
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every sites)
:returns:
"""
if 'hours' not in self.args:
return S_ERROR('Number of hours not specified')
hours = self.args['hours']
sites = None
if 'sites' in self.args:
sites = self.args['sites']
if sites is None:
# FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = getSites()
if not sites['OK']:
return sites
sites = sites['Value']
if not sites:
return S_ERROR('Sites is empty')
fromD = datetime.utcnow() - timedelta(hours=hours)
toD = datetime.utcnow()
runJobs = self.rClient.getReport('WMSHistory', 'NumberOfJobs', fromD, toD,
{}, 'Site')
if not runJobs['OK']:
return runJobs
runJobs = runJobs['Value']
if 'data' not in runJobs:
return S_ERROR('Missing data key')
if 'granularity' not in runJobs:
return S_ERROR('Missing granularity key')
runJobs['data'] = {site: strToIntDict(value) for site, value in runJobs['data'].items()}
singlePlots = {}
for site, value in runJobs['data'].items():
if site in sites:
plot = {}
plot['data'] = {site: value}
plot['granularity'] = runJobs['granularity']
singlePlots[site] = plot
return S_OK(singlePlots)
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/AccountingCacheCommand.py
|
Python
|
gpl-3.0
| 16,493
|
[
"DIRAC"
] |
9b2f4ac8cb95462a4809dfbec7450699da2ed6bf7bb24d6cdffa2d4026da080a
|
#!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
import traceback
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
print(req.get("result").get("action"))
if req.get("result").get("action") =="chatbotFoodService":
print("Inside chatbotFoodService")
res = processRequest(req)
elif req.get("result").get("action") =="myloyalityService":
print("Inside myloyalityService")
res = processLoyaltyRequest(req)
elif req.get("result").get("action") =="mydrinksService":
print("Inside myloyalityService")
res = processDrinksRequest(req)
else:
print("Inside promo")
res= processPromotionRequest(req)
res = json.dumps(res, indent=4)
print("After json dumps" + res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
@app.route('/getRegisteredUsers', methods=['GET'])
def getRegisteredUsers():
baseurl = "http://ec2-54-183-130-113.us-west-1.compute.amazonaws.com:8084/getRegisteredUsers"
result = urlopen(baseurl).read()
print("Executed Rest Call"+result);
r = make_response(result)
r.headers['Content-Type'] = 'application/json'
return r
@app.route('/getSailorUsers', methods=['GET'])
def getSailorUsers():
baseurl = "http://ec2-54-183-130-113.us-west-1.compute.amazonaws.com:8084/api/sailorProfile/view"
result = urlopen(baseurl).read()
print("Executed Rest Call"+result);
r = make_response(result)
r.headers['Content-Type'] = 'application/json'
return r
@app.route('/getHelloMessage', methods=['GET'])
def getHelloMessage():
result = "Hello I am from TIBCO"
r = make_response(result)
r.headers['Content-Type'] = 'application/text'
return r
def processDrinksRequest(req):
print(req.get("result").get("action"))
if req.get("result").get("action") != "mydrinksService":
return {}
baseurl = "http://ec2-54-183-130-113.us-west-1.compute.amazonaws.com:8080/alexa/products/beverages/tap?beverageType=Tap"
print(baseurl)
result = urlopen(baseurl).read()
print("drinks recieved")
res = makeDrinkWebhookResult(result)
return res
def processPromotionRequest(req):
print(req.get("result").get("action"))
if req.get("result").get("action") != "promotionService":
return {}
baseurl = "http://ec2-54-183-130-113.us-west-1.compute.amazonaws.com:8080/alexa/getStorePromotions?storeId=4829CA"
print(baseurl)
result = urlopen(baseurl).read()
print("promotion recieved")
res = makePromoWebhookResult(result)
return res
def processRequest(req):
print(req.get("result").get("action"))
if req.get("result").get("action") != "chatbotFoodService":
return {}
print(req.get("result").get("action"))
baseurl = "http://alexav2.cloudhub.io/alexa/products?"
print(req.get("result").get("action"))
bww_query = makebwwQuery(req)
print("bww_query :- "+ bww_query)
if bww_query is None:
return {}
bww_url = baseurl + bww_query
print(bww_url)
result = urlopen(bww_url).read()
res = makeWebhookResult(result)
return res
def processLoyaltyRequest(req):
print(req.get("result").get("action"))
if req.get("result").get("action") != "myloyalityService":
return {}
result = req.get("result")
parameters = result.get("parameters")
baseurl = "http://ec2-54-183-130-113.us-west-1.compute.amazonaws.com:8080/alexa/mcdonalds/memberPoints?memberID="+parameters.get("memberid")
print("baseURL :- " + baseurl)
result = urlopen(baseurl).read()
res = makeLoyalWebhookResult(result)
return res
def makebwwQuery(req):
result = req.get("result")
parameters = result.get("parameters")
product = parameters.get("product")
quantity= parameters.get("quantity")
productType= parameters.get("type")
return "productName="+product+"&quantityName="+quantity+"&productType="+productType
def makeWebhookResult(result):
print("Inside makeWebhookResult")
try:
resp = json.loads(result)
#print("resp is " + resp)
listPrice=resp[0]['listPrice']
print("listPrice is " + listPrice)
speech = "your amount for current order is " + listPrice
except Exception as e: print(e)
if resp is None:
return {}
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-chatFoodOrder-webhook-master"
}
def makePromoWebhookResult(result):
print("Inside makePromoWebhookResult")
try:
resp = json.loads(result)
speech = "Here are our current promotions. Our first promotion is "+ resp[0]['promoName'] +". " +resp[0]['promoDescr'] +"\n Our second promotion is " + resp[1]['promoName'] + ". "+resp[1]['promoDescr'] +". \n Please visit your nearest restaurant for more information."
except Exception as e: print(e)
print("Speech is "+speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-chatFoodOrder-webhook-master"
}
def makeLoyalWebhookResult(result):
print("Inside makeLoyalWebhookResult")
try:
resp = json.loads(result)
speech = "You currently have"+ resp[0]['loyaltyPoints'] +" loyalty points. \n Thanks for being a loyal member of our family. To redeem points, please visit any participating restaurant."
except Exception as e: print(e)
print("Speech is "+speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-chatFoodOrder-webhook-master"
}
def makeDrinkWebhookResult(result):
print("Inside makeDrinkWebhookResult")
try:
resp = json.loads(result)
speech = ""
except Exception as e: print(e)
for item in resp:
if item['beverageDesc'] is None:
print("")
else:
speech=speech +item['beverageName']+ " " + item['beverageDesc'] +" \n"
print("Speech is "+speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-chatFoodOrder-webhook-master"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
ypalkar/apiai-bww-webhook-sample
|
app.py
|
Python
|
apache-2.0
| 6,975
|
[
"VisIt"
] |
8f2745f21f693656919cc76041138c280a9dadec3ae3889842ef5215b878e98a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rpcserver - minimal rpc benchmark server
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Minimal RPC benchmark server"""
import sys
import getopt
def default_configuration():
"""Return dictionary with default configuration values"""
# Use empty address to listen on all interfaces
conf = {'address': "", 'port': 8001, 'transport': 'xmlrpc'}
return conf
def usage():
"""Usage help"""
print("Usage: %s" % sys.argv[0])
print("Run RPC benchmark server")
print("Options and default values:")
for (key, val) in default_configuration().items():
print("--%s: %s" % (key, val))
def true():
"""Minimal dummy function"""
return True
def main(conf):
"""Run minimal benchmark server"""
if conf["transport"] == "xmlrpc":
from SimpleXMLRPCServer import SimpleXMLRPCServer, \
SimpleXMLRPCRequestHandler
handler = SimpleXMLRPCRequestHandler
# Force keep-alive support - please note that pypy clients may need to
# force garbage collection to actually close connection
handler.protocol_version = 'HTTP/1.1'
server = SimpleXMLRPCServer((conf['address'], conf['port']),
requestHandler=handler)
# Do not log requests
server.logRequests = 0
print("Listening on '%(address)s:%(port)d..." % conf)
server.register_function(true, 'x')
server.serve_forever()
elif conf["transport"] in ["pyro", "pyrossl"]:
import Pyro.core
import Pyro.protocol
class AllWrap(Pyro.core.ObjBase):
"""Pyro needs an object to expose functions/methods"""
def x(self):
"""Minimal dummy method"""
return True
if conf["transport"] == "pyrossl":
# requires m2crypto module, concatenated ssl key/cert and cacert
proto = 'PYROSSL'
Pyro.config.PYROSSL_CERTDIR = '.'
Pyro.config.PYROSSL_CA_CERT = 'cacert.pem'
Pyro.config.PYROSSL_SERVER_CERT = "combined.pem"
Pyro.config.PYROSSL_CLIENT_CERT = "combined.pem"
Pyro.config.PYRO_DNS_URI = True
else:
proto = 'PYRO'
Pyro.core.initServer(banner=0)
server = Pyro.core.Daemon(prtcol=proto, host=conf["address"],
port=conf["port"])
# Optional client certificate check
#if conf["transport"] == "pyrossl":
# server.setNewConnectionValidator(Pyro.protocol.BasicSSLValidator())
print("Listening on '%(address)s:%(port)d..." % conf)
# Skip name server and bind wrap object to 'all' with method x.
# client must open proxy to URI/all to enable use of proxy.x()
server.connectPersistent(AllWrap(), "all")
server.requestLoop()
else:
print("unknown transport: %(transport)s" % conf)
sys.exit(1)
if __name__ == '__main__':
conf = default_configuration()
# Parse command line
try:
(opts, args) = getopt.getopt(sys.argv[1:],
'a:hp:t:', [
'address=',
'help',
'port=',
'transport=',
])
except getopt.GetoptError as err:
print('Error in option parsing: ' + err.msg)
usage()
sys.exit(1)
for (opt, val) in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-a', '--address'):
conf["address"] = val
elif opt in ('-p', '--port'):
try:
conf["port"] = int(val)
except ValueError, err:
print('Error in parsing %s value: %s' % (opt, err))
sys.exit(1)
elif opt in ('-t', '--transport'):
conf["transport"] = val
else:
print("unknown option: %s" % opt)
usage()
sys.exit(1)
main(conf)
|
heromod/migrid
|
mig/grsfs-fuse/benchmarks/code/rpcserver.py
|
Python
|
gpl-2.0
| 4,822
|
[
"Brian"
] |
2391b82c76a4d956ea26f7b90be9c69593db0e54644a09f0bf7fc42fe2b6d2e8
|
from ase.lattice import bulk
from gpaw import GPAW
from gpaw.wavefunctions.pw import PW
# Plane wave cutoff
pwcutoff = 600.0
# NxNxN k-point sampling, gamma-centred grid
k = 4
# Si lattice constant
alat = 5.421
# bulk calculation
bulk_crystal = bulk('Si', 'diamond', a=alat)
bulk_calc = GPAW(
mode = PW(pwcutoff),
kpts={'size': (k, k, k), 'gamma': True},
dtype=complex,
xc='PBE',
txt='si.rpa.pbe_output.txt',
parallel={'band':1}
)
bulk_crystal.set_calculator(bulk_calc)
e0_bulk_pbe = bulk_crystal.get_potential_energy()
# Now we have the density, but only the occupied states;
# at this point we diagonalise the full Hamiltonian to get
# the empty states as well (expensive)
bulk_calc.diagonalize_full_hamiltonian(nbands=200)
# the 'all' ensures we write wavefunctions too
bulk_calc.write('bulk.gpw',mode='all')
|
robwarm/gpaw-symm
|
doc/exercises/rpa/si.rpa_init_pbe.py
|
Python
|
gpl-3.0
| 907
|
[
"ASE",
"GPAW"
] |
562c6a47c874efd3734b07fb063e81645ce737570598c4457a5314ef99cdd22b
|
#!/usr/bin/env python2
from netCDF4 import Dataset
import numpy
from geopy.distance import vincenty
from wrfpy.config import config
from wrfpy import utils
from wrfpy.readObsTemperature import readObsTemperature
import os
from datetime import datetime
import operator
from numpy import unravel_index
from numpy import shape as npshape
import glob
import statsmodels.api as sm
import csv
import numpy as np
import f90nml
from scipy import interpolate
from astropy.convolution import convolve
def return_float_int(value):
try:
return int(value.strip(','))
except ValueError:
return float(value.strip(','))
def convert_to_number(list):
if len(list) == 0:
return list
elif len(list) == 1:
return return_float_int(list[0])
elif len(list) > 1:
return [return_float_int(value) for value in list]
else:
return list
def reg_m(y, x):
ones = numpy.ones(len(x[0]))
X = sm.add_constant(numpy.column_stack((x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(numpy.column_stack((ele, X)))
results = sm.OLS(y, X).fit()
return results
def find_gridpoint(lat_in, lon_in, lat, lon):
'''
lat_in, lon_in: lat/lon coordinate of point of interest
lat, lon: grid of lat/lon to find closest index of gridpoint
'''
# extract window surrounding point
lon_window = lon[(lon >= lon_in - 0.10) &
(lon <= lon_in + 0.10) &
(lat >= lat_in - 0.10) &
(lat <= lat_in + 0.10)]
lat_window = lat[(lon >= lon_in - 0.10) &
(lon <= lon_in + 0.10) &
(lat >= lat_in - 0.10) &
(lat <= lat_in + 0.10)]
lonx = lon_window
latx = lat_window
# calculate distance to each point in the surrounding window
distance = [vincenty((lat_in, lon_in), (latx[idx], lonx[idx])).km
for idx in range(0, len(lonx))]
# find index of closest reference station to wunderground station
try:
min_index, min_value = min(enumerate(distance),
key=operator.itemgetter(1))
lat_sel = latx[min_index]
# indices of gridpoint
latidx = lat.reshape(-1).tolist().index(lat_sel)
(lat_idx, lon_idx) = unravel_index(latidx, npshape(lon))
return lat_idx, lon_idx
except ValueError:
return None, None
class urbparm(config):
def __init__(self, dtobj, infile):
config.__init__(self)
if self.config['options_urbantemps']['ah.csv']:
ahcsv = self.config['options_urbantemps']['ah.csv']
self.read_ah_csv(ahcsv, dtobj)
self.options = self.read_tbl(infile)
self.change_AH()
self.write_tbl()
def read_ah_csv(self, ahcsv, dtobj):
'''
read anthropogenic heat from csv file
columns are: yr, month, ah, alh
alh column is optional
'''
# initialize variables in csv file
yr = []
mnth = []
ah = []
alh = [] # optional
# start reading csv file
with open(ahcsv, 'r') as inp:
reader = csv.reader(inp)
next(reader) # skip header
for row in reader:
# append variables
yr.append(int(row[0]))
mnth.append(int(row[1]))
ah.append(float(row[2]))
try:
alh.append(float(row[3]))
except IndexError:
alh.append(None)
yr = np.array(yr)
mnth = np.array(mnth)
ah = np.array(ah)
alh = np.array(alh)
self.ah = ah[(yr == dtobj.year) & (mnth == dtobj.month)][0]
if not float(self.ah) > 0:
self.ah = None
self.alh = alh[(yr == dtobj.year) & (mnth == dtobj.month)][0]
if not float(self.alh) > 0:
self.alh = None
@staticmethod
def read_tbl(tblfile):
'''
Read URBPARM.TBL
'''
COMMENT_CHAR = '#'
OPTION_CHAR = ':'
# process GEOGRID.TBL
options = {}
with open(tblfile) as openfileobject:
for line in openfileobject:
# First, remove comments:
if COMMENT_CHAR in line:
# split on comment char, keep only the part before
line, comment = line.split(COMMENT_CHAR, 1)
# Second, find lines with an option=value:
if OPTION_CHAR in line:
# split on option char:
option, value = line.split(OPTION_CHAR, 1)
# strip spaces:
option = option.strip()
value = convert_to_number(value.strip().split())
# store in dictionary:
options[option] = value
return options
def write_tbl(self):
'''
Write URBPARM.TBL to wrf run directory
'''
outfile = os.path.join(self.config['filesystem']['wrf_run_dir'],
'URBPARM.TBL')
# remove outfile if exists
utils.silentremove(outfile)
# write new outfile
file = open(outfile, 'w')
space_sep = ['HSEQUIP', 'AHDIUPRF', 'ALHDIUPRF']
for key in self.options.keys():
if key not in ['STREET PARAMETERS', 'BUILDING HEIGHTS']:
try:
if key not in space_sep:
file.write("{0} : {1}\n".format
(key, ", ".join(str(x)
for x in self.options.get(key))))
else:
file.write("{0} : {1}\n".format
(key, " ".join(str(x)
for x in self.options.get(key))))
except TypeError:
file.write("{0} : {1}\n".format
(key, self.options.get(key)))
file.close()
def change_AH(self):
'''
Modify anthropogenic heat with ones in csv file
'''
if self.ah:
self.options['AH'][-1] = self.ah
if self.alh:
self.options['ALH'][-1] = self.alh
class bumpskin(config):
def __init__(self, filename, nstationtypes=None, dstationtypes=None):
config.__init__(self)
# optional define station types to be used
self.nstationtypes = nstationtypes # stationtypes at night
self.dstationtypes = dstationtypes # stationtypes during daytime
self.wrfda_workdir = os.path.join(self.config
['filesystem']['work_dir'], 'wrfda')
self.wrf_rundir = self.config['filesystem']['work_dir']
# verify input
self.verify_input(filename)
# get number of domains
wrf_nml = f90nml.read(self.config['options_wrf']['namelist.input'])
ndoms = wrf_nml['domains']['max_dom']
# check if ndoms is an integer and >0
if not (isinstance(ndoms, int) and ndoms > 0):
raise ValueError("'domains_max_dom' namelist variable should be an"
" integer>0")
try:
(lat, lon, diffT) = self.findDiffT(1)
for domain in range(1, ndoms+1):
self.applyToGrid(lat, lon, diffT, domain)
except TypeError:
pass
def verify_input(self, filename):
'''
verify input and create list of files
'''
try:
f = Dataset(filename, 'r')
f.close()
self.filelist = [filename]
except IOError:
# file is not a netcdf file, assuming a txt file containing a
# list of netcdf files
if os.path.isdir(filename):
# path is actually a directory, not a file
self.filelist = glob.glob(os.path.join(filename, '*nc'))
else:
# re-raise error
raise
def get_time(self, wrfinput):
'''
get time from wrfinput file
'''
wrfinput = Dataset(wrfinput, 'r') # open netcdf file
# get datetime string from wrfinput file
datestr = ''.join(wrfinput.variables['Times'][0])
# convert to datetime object
dtobj = datetime.strptime(datestr, '%Y-%m-%d_%H:%M:%S')
wrfinput.close() # close netcdf file
return dtobj, datestr
@staticmethod
def getCoords(wrfinput):
'''
Return XLAT,XLONG coordinates from wrfinput file
'''
wrfinput = Dataset(wrfinput, 'r') # open netcdf file
lat = wrfinput.variables['XLAT'][0, :]
lon = wrfinput.variables['XLONG'][0, :]
lu_ind = wrfinput.variables['LU_INDEX'][0, :]
wrfinput.close()
return (lat, lon, lu_ind)
@staticmethod
def clean_2m_temp(T2, LU_INDEX, iswater, filter=True):
'''
Cleanup large spatial 2m temperature fluctuations
'''
if filter:
# set water points to NaN
t2 = T2
t2[LU_INDEX[0, :] == iswater] = np.nan
# convolution kernel
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
# apply convolution kernel
T2_filtered = convolve(t2[:], kernel,
nan_treatment='interpolate',
preserve_nan=True)
# handle domain edges
T2_filtered[0, :] = T2[0, :]
T2_filtered[-1, :] = T2[-1, :]
T2_filtered[:, 0] = T2[:, 0]
T2_filtered[:, -1] = T2[:, -1]
# difference between filtered and original
diff = np.abs(T2_filtered - T2)
# replace points with large difference
# compared to neighboring points
T2[diff > 3] = T2_filtered[diff > 3]
print('Total points changed in T2 field: ' +
str(len(T2[diff > 3])))
print('Average increment: ' +
str(np.sum(diff[diff > 3])/len(T2[diff > 3])))
return T2
def get_urban_temp(self, wrfinput, ams):
'''
get urban temperature
'''
wrfinput = Dataset(wrfinput, 'r') # open netcdf file
# get datetime string from wrfinput file
LU_IND = wrfinput.variables['LU_INDEX'][0, :]
iswater = wrfinput.getncattr('ISWATER')
GLW_IND = wrfinput.variables['GLW'][0, :]
U10_IND = wrfinput.variables['U10'][0, :]
V10_IND = wrfinput.variables['V10'][0, :]
UV10_IND = numpy.sqrt(U10_IND**2 + V10_IND**2)
lat = wrfinput.variables['XLAT'][0, :]
lon = wrfinput.variables['XLONG'][0, :]
T2_IND = wrfinput.variables['T2'][0, :]
T2_IND = self.clean_2m_temp(T2_IND, LU_IND,
iswater, filter=True)
T2 = []
U10 = []
V10 = []
GLW = []
LU = []
for point in ams:
i_idx, j_idx = find_gridpoint(point[0], point[1], lat, lon)
if (i_idx and j_idx):
T2.append(T2_IND[i_idx, j_idx])
U10.append(wrfinput.variables['U10'][0, i_idx, j_idx])
V10.append(wrfinput.variables['V10'][0, i_idx, j_idx])
GLW.append(wrfinput.variables['GLW'][0, i_idx, j_idx])
LU.append(wrfinput.variables['LU_INDEX'][0, i_idx, j_idx])
else:
T2.append(numpy.nan)
U10.append(numpy.nan)
V10.append(numpy.nan)
GLW.append(numpy.nan)
LU.append(numpy.nan)
wrfinput.close()
UV10 = numpy.sqrt(numpy.array(U10)**2 + numpy.array(V10)**2)
return (T2, numpy.array(GLW), UV10, numpy.array(LU), LU_IND,
GLW_IND, UV10_IND)
def findDiffT(self, domain):
'''
calculate increment of urban temperatures and apply increment
to wrfinput file in wrfda directory
'''
# load netcdf files
wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain))
wrfinput = os.path.join(wrfda_workdir, 'wrfvar_output')
# get datetime from wrfinput file
dtobj, datestr = self.get_time(wrfinput)
# get observed temperatures
obs = readObsTemperature(dtobj, nstationtypes=None,
dstationtypes=None).obs
obs_temp = [obs[idx][2] for idx in range(0, len(obs))]
# get modeled temperatures at location of observation stations
t_urb, glw, uv10, lu, LU_IND, glw_IND, uv10_IND = self.get_urban_temp(
wrfinput, obs)
lat, lon, lu_ind = self.getCoords(wrfinput) # get coordinates
diffT_station = numpy.array(obs_temp) - numpy.array(t_urb)
# calculate median and standard deviation, ignore outliers > 10K
# only consider landuse class 1
nanmask = ((~numpy.isnan(diffT_station)) & (lu == 1) &
(abs(diffT_station) < 5))
obs = numpy.array(obs)
obs = obs[nanmask]
diffT_station = diffT_station[nanmask]
lu = lu[nanmask]
glw = glw[nanmask]
uv10 = uv10[nanmask]
median = numpy.nanmedian(diffT_station[(abs(diffT_station) < 5)])
std = numpy.nanstd(diffT_station[(abs(diffT_station) < 5)])
print('print diffT station')
print(diffT_station[(abs(diffT_station) < 5)])
print('end print diffT station')
# depending on the number of observations, calculate the temperature
# increment differently
if (len(lu) < 3):
# no temperature increment for <3 observations
print('No temperature increment applied, not enough data.')
diffT = numpy.zeros(numpy.shape(glw_IND))
elif ((len(lu) >= 3) & (len(lu) < 5)):
# use median if between 3 and 5 observations
print('Median temperature increment applied: ' + str(median))
diffT = median * numpy.ones(numpy.shape(glw_IND))
diffT[LU_IND != 1] = 0
else:
# fit statistical model
# define mask
mask = ((diffT_station > median - 2*std) &
(diffT_station < median + 2*std) &
(lu == 1) & (abs(diffT_station) < 5))
# filter obs
obs = obs[mask]
# recalculate median
median = numpy.nanmedian(diffT_station[mask])
fit = reg_m(diffT_station[mask], [(glw)[mask], uv10[mask]])
# calculate diffT for every gridpoint
if fit.f_pvalue <= 0.1: # use fit if significant
print('Temperature increment applied from statistical ',
+ 'fit with values: ' + str(fit.params))
diffT = (fit.params[1] * glw_IND +
fit.params[0] * uv10_IND + fit.params[2])
else: # use median
print('Median temperature increment applied: ' + str(median))
diffT = median * numpy.ones(numpy.shape(glw_IND))
diffT[LU_IND != 1] = 0 # set to 0 if LU_IND!=1
return (lat, lon, diffT)
def applyToGrid(self, lat, lon, diffT, domain):
# load netcdf files
wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain))
wrfinputFile = os.path.join(wrfda_workdir, 'wrfvar_output')
lat2, lon2, lu_ind2 = self.getCoords(wrfinputFile)
# get datetime from wrfinput file
dtobj, datestr = self.get_time(wrfinputFile)
# if not ((lat==lat2) and (lon==lon2)) we need to interpolate
if not (np.array_equal(lat, lat2) and np.array_equal(lon, lon2)):
# do interpolation to get new diffT
diffT = interpolate.griddata(
(lon.reshape(-1), lat.reshape(-1)), diffT.reshape(-1),
(lon2.reshape(-1), lat2.reshape(-1)),
method='cubic').reshape(np.shape(lon2))
diffT[lu_ind2 != 1] = 0 # set to 0 if LU_IND!=1
# open wrfvar_output (output after data assimilation)
self.wrfinput2 = Dataset(os.path.join(wrfda_workdir, 'wrfvar_output'),
'r+')
# open wrfvar_input (input before DA (last step previous run)
start_date = utils.return_validate(
self.config['options_general']['date_start'])
if (dtobj == start_date): # very first timestep
self.wrfinput3 = Dataset(os.path.join
(self.wrf_rundir,
('wrfinput_d0' + str(domain))), 'r')
return
else:
self.wrfinput3 = Dataset(os.path.join
(self.wrf_rundir,
('wrfvar_input_d0' + str(domain) +
'_' + datestr)), 'r')
# define variables to increment
# variables_2d = ['TC_URB', 'TR_URB', 'TB_URB', 'TG_URB', 'TS_URB']
# variables_3d = ['TRL_URB', 'TBL_URB', 'TGL_URB', 'TSLB']
# begin determining multiplying factor
rhocp = 1231
uc_urb = self.wrfinput2.variables['UC_URB'][:]
lp_urb = self.wrfinput2.variables['BUILD_AREA_FRACTION'][:]
hgt_urb = self.wrfinput2.variables['BUILD_HEIGHT'][:]
lb_urb = self.wrfinput2.variables['BUILD_SURF_RATIO'][:]
frc_urb = self.wrfinput2.variables['FRC_URB2D'][:]
chc_urb = self.wrfinput2.variables['CHC_SFCDIF'][:]
R = numpy.maximum(numpy.minimum(lp_urb/frc_urb, 0.9), 0.1)
RW = 1.0 - R
HNORM = 2. * hgt_urb * frc_urb / (lb_urb - lp_urb)
HNORM[lb_urb <= lp_urb] = 10.0
ZR = numpy.maximum(numpy.minimum(hgt_urb, 100.0), 3.0)
h = ZR / HNORM
W = 2 * h
# set safety margin on W/RW >=8 or else SLUCM could misbehave
# make sure to use the same safety margin in module_sf_urban.F
W[(W / RW) > 8.0] = ((8.0 / (W / RW)) * W)[(W / RW) > 8.0]
CW = numpy.zeros(numpy.shape(uc_urb))
CW[uc_urb > 5] = 7.51 * uc_urb[uc_urb > 5]**0.78
CW[uc_urb <= 5] = 6.15 + 4.18 * uc_urb[uc_urb <= 5]
DTW = diffT * (1 + ((RW * rhocp) / (W + RW)) * (chc_urb/CW))
diffT = DTW # change 09/01/2018
diffT = numpy.nan_to_num(diffT) # replace nan by 0
# apply temperature changes
TSK = self.wrfinput2.variables['TSK']
TSK[:] = TSK[:] + diffT
TB_URB = self.wrfinput2.variables['TB_URB']
TB_URB[:] = TB_URB[:] + diffT
TG_URB = self.wrfinput2.variables['TG_URB']
TG_URB[:] = TG_URB[:] + diffT
TS_URB = self.wrfinput2.variables['TS_URB']
TS_URB[:] = TS_URB[:] + diffT
TGR_URB = self.wrfinput2.variables['TGR_URB']
TGR_URB[:] = TGR_URB[:] + diffT
# wall layer temperature
try:
TBL_URB_factors = self.config['options_urbantemps']['TBL_URB']
except KeyError:
# fallback values if none are defined in config
# these may not work correctly for other cities than Amsterdam
TBL_URB_factors = [0.823, 0.558, 0.379, 0.257]
if not (isinstance(TBL_URB_factors, list) and
len(TBL_URB_factors) > 1):
TBL_URB_factors = [0.823, 0.558, 0.379, 0.257]
TBL_URB = self.wrfinput2.variables['TBL_URB']
levs = numpy.shape(self.wrfinput2.variables['TBL_URB'][:])[1]
TBL_URB = self.wrfinput2.variables['TBL_URB']
for lev in range(0, levs):
try:
TBL_URB[0, lev, :] = (TBL_URB[0, lev, :] +
diffT * float(TBL_URB_factors[lev]))
except IndexError:
# no factor for this layer => no increment
pass
# road layer temperature
try:
TGL_URB_factors = self.config['options_urbantemps']['TGL_URB']
except KeyError:
# fallback values if none are defined in config
# these may not work correctly for other cities than Amsterdam
TGL_URB_factors = [0.776, 0.170, 0.004]
if not (isinstance(TGL_URB_factors, list) and
len(TGL_URB_factors) > 1):
TGL_URB_factors = [0.776, 0.170, 0.004]
TGL_URB = self.wrfinput2.variables['TGL_URB']
levs = numpy.shape(self.wrfinput2.variables['TGL_URB'][:])[1]
TGL_URB = self.wrfinput2.variables['TGL_URB']
for lev in range(0, levs):
try:
TGL_URB[0, lev, :] = (TGL_URB[0, lev, :] +
diffT * float(TGL_URB_factors[lev]))
except IndexError:
# no factor for this layer => no increment
pass
# adjustment soil for vegetation fraction urban cell
try:
TSLB_factors = self.config['options_urbantemps']['TSLB']
except KeyError:
# fallback values if none are defined in config
# these may not work correctly for other cities than Amsterdam
TSLB_factors = [0.507, 0.009]
if not (isinstance(TSLB_factors, list) and
len(TSLB_factors) > 1):
TSLB_factors = [0.507, 0.009]
TSLB = self.wrfinput2.variables['TSLB'] # after update_lsm
TSLB_in = self.wrfinput3.variables['TSLB'] # before update_lsm
levs = numpy.shape(self.wrfinput2.variables['TSLB'][:])[1]
for lev in range(0, levs):
# reset TSLB for urban cells to value before update_lsm
TSLB[0, lev, :][lu_ind2 == 1] = TSLB_in[0, lev, :][lu_ind2 == 1]
try:
TSLB[0, lev, :] = (TSLB[0, lev, :] +
diffT * float(TSLB_factors[lev]))
except IndexError:
pass
# close netcdf file
self.wrfinput2.close()
self.wrfinput3.close()
|
ERA-URBAN/wrfpy
|
wrfpy/bumpskin.py
|
Python
|
apache-2.0
| 21,936
|
[
"NetCDF"
] |
8ce0145d1d76634412b883b8a36d92a95f3024119ee31208a36d9d79d99fbccf
|
import ply.lex as lex
import ply.yacc as yacc
import lan
import rewriter
from codegen import cgen
from host import boilerplategen, kernelgen
from processing import collect_id as ci
from transformation import define_arguments as darg
from transformation import stencil
from transformation import transpose as tp
fileprefix = "../test/C/"
SetNoReadBack = False
DoOptimizations = True
IsDebug = True
def __get_ast_from_file(foldername, filename):
cparser = yacc.yacc(module=lan)
lex.lex(module=lan)
fullfilename = fileprefix + foldername + '/' + filename
try:
f = open(fullfilename, 'r')
s = f.read()
f.close()
except EOFError:
print('file %s wasn\'t found', fullfilename)
lex.input(s)
while 1:
tok = lex.token()
if not tok: break
## print tok
ast = cparser.parse(s)
return ast
def __get_baseform_name(name):
return fileprefix + name + '/' + __get_baseform_filename(name)
def __get_baseform_filename(name):
return 'baseform_' + name.lower() + '.cpp'
def _create_baseform(name):
ast = __get_ast_from_init(name)
cprint = cgen.CGenerator()
baseform_filename = __get_baseform_name(name)
cprint.write_ast_to_file(ast, filename=baseform_filename)
def __get_ast_from_init(name):
ast = __get_ast_from_file(name, name + 'For.cpp')
ast.ext.append(lan.ProgramName(name))
ast.ext.append(lan.RunOCLArg(lan.TypeId(['std::string'], lan.Id('ocl_type'))))
rw = rewriter.Rewriter()
rw.rewrite_to_baseform(ast, name + 'For')
return ast
def __get_ast_from_base(name):
ast = __get_ast_from_file(name, __get_baseform_filename(name))
return ast
def gen_full_code(ast):
kgen = kernelgen.KernelGen(ast, fileprefix)
cprint = cgen.CGenerator()
kgen.generate_kernels()
boilerplate = boilerplategen.Boilerplate(ast, SetNoReadBack, IsDebug)
boilerast = boilerplate.generate_code()
name = ci.get_program_name(ast)
cprint.write_ast_to_file(boilerast, filename=fileprefix + name + '/' + 'boilerplate.cpp')
def matmul():
name = 'MatMul'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def __optimize(ast, par_dim=None):
ast.ext.append(lan.ParDim(par_dim))
name = ci.get_program_name(ast)
if DoOptimizations:
__main_transpose(ast)
if name == 'Jacobi':
__main_stencil(ast)
__main_definearg(ast)
gen_full_code(ast)
def knearest():
name = 'KNearest'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast, par_dim=1)
def jacobi():
name = 'Jacobi'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def nbody():
name = 'NBody'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def laplace():
name = 'Laplace'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast, par_dim=1)
def gaussian():
name = 'GaussianDerivates'
if True:
ast = __get_ast_from_init(name)
else:
ast = __get_ast_from_base(name)
__optimize(ast)
def __main_transpose(ast):
tps = tp.Transpose(ast)
tps.transpose()
def __main_definearg(ast):
dargs = darg.DefineArguments(ast)
dargs.define_arguments()
def __main_stencil(ast):
sten = stencil.Stencil(ast)
sten.stencil(['X1'], west=1, north=1, east=1, south=1, middle=0)
if __name__ == "__main__":
matmul()
knearest()
jacobi()
nbody()
laplace()
gaussian()
|
dikujepsen/OpenTran
|
src/framework/main.py
|
Python
|
mit
| 3,760
|
[
"Gaussian"
] |
28c8ba94140a3321c66235a1e35a9a9f7cba2bbd8b4f2ca7f30f328b654e8173
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from pgmpy.factors.continuous import ContinuousFactor
from pgmpy.factors.continuous import JointGaussianDistribution
class CanonicalFactor(ContinuousFactor):
u"""
The intermediate factors in a Gaussian network can be described
compactly using a simple parametric representation called the
canonical form. This representation is closed under the basic
operations used in inference: factor product, factor division,
factor reduction, and marginalization. Thus, we define this
CanonicalFactor class that allows the inference process to be
performed on joint Gaussian networks.
A canonical form C (X; K,h,g) is defined as
C (X; K,h,g) = exp( ((-1/2) * X.T * K * X) + (h.T * X) + g)
Reference
---------
Probabilistic Graphical Models, Principles and Techniques,
Daphne Koller and Nir Friedman, Section 14.2, Chapter 14.
"""
def __init__(self, variables, K, h, g):
"""
Parameters
----------
variables: list or array-like
The variables for wich the distribution is defined.
K: n x n, 2-d array-like
h : n x 1, array-like
g : int, float
pdf: function
The probability density function of the distribution.
The terms K, h and g are defined parameters for canonical
factors representation.
Examples
--------
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi = CanonicalFactor(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
np.array([[1], [-1]]), -3)
>>> phi.variables
['X', 'Y']
>>> phi.K
array([[1, -1],
[-1, 1]])
>>> phi.h
array([[1],
[-1]])
>>> phi.g
-3
"""
no_of_var = len(variables)
if len(h) != no_of_var:
raise ValueError("Length of h parameter vector must be equal to the"
"number of variables.")
self.h = np.asarray(np.reshape(h, (no_of_var, 1)), dtype=float)
self.g = g
self.K = np.asarray(K, dtype=float)
if self.K.shape != (no_of_var, no_of_var):
raise ValueError("The K matrix should be a square matrix with order equal to"
"the number of variables. Got: {got_shape}, Expected: {exp_shape}".format
(got_shape=self.K.shape, exp_shape=(no_of_var, no_of_var)))
super(CanonicalFactor, self).__init__(variables, None)
@property
def pdf(self):
def fun(*args):
x = np.array(args)
return np.exp(self.g + np.dot(x, self.h)[0] - 0.5 * np.dot(x.T, np.dot(self.K, x)))
return fun
def copy(self):
"""
Makes a copy of the factor.
Returns
-------
CanonicalFactor object: Copy of the factor
Examples
--------
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi = CanonicalFactor(['X', 'Y'], np.array([[1, -1], [-1, 1]]),
np.array([[1], [-1]]), -3)
>>> phi.variables
['X', 'Y']
>>> phi.K
array([[1, -1],
[-1, 1]])
>>> phi.h
array([[1],
[-1]])
>>> phi.g
-3
>>> phi2 = phi.copy()
>>> phi2.variables
['X', 'Y']
>>> phi2.K
array([[1, -1],
[-1, 1]])
>>> phi2.h
array([[1],
[-1]])
>>> phi2.g
-3
"""
copy_factor = CanonicalFactor(self.scope(), self.K.copy(),
self.h.copy(), self.g)
return copy_factor
def to_joint_gaussian(self):
"""
Return an equivalent Joint Gaussian Distribution.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi = CanonicalFactor(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
>>> jgd = phi.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2']
>>> jgd.covariance
array([[ 0.5 , 0.25 ],
[ 0.25 , 0.375]])
>>> jgd.mean
array([[ 2.25 ],
[ 0.875]])
"""
covariance = np.linalg.inv(self.K)
mean = np.dot(covariance, self.h)
return JointGaussianDistribution(self.scope(), mean, covariance)
def reduce(self, values, inplace=True):
"""
Reduces the distribution to the context of the given variable values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
The formula for the obtained conditional distribution for setting
Y = y is given by,
.. math:: K' = K_{XX}
.. math:: h' = h_X - K_{XY} * y
.. math:: g' = g + {h^T}_Y * y - 0.5 * y^T * K_{YY} * y
Parameters
----------
values: list, array-like
A list of tuples of the form (variable name, variable value).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new CaninicalFactor object.
Returns
-------
CanonicalFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalFactor instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi = CanonicalFactor(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.variables
['X1', 'X2', 'X3']
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
-2
>>> phi.reduce([('X3', 0.25)])
>>> phi.variables
['X1', 'X2']
>>> phi.K
array([[ 1, -1],
[-1, 4]])
>>> phi.h
array([[ 1. ],
[ 4.5]])
>>> phi.g
-2.375
"""
if not isinstance(values, (list, tuple, np.ndarray)):
raise TypeError("variables: Expected type list or array-like, "
"got type {var_type}".format(var_type=type(values)))
if not all([var in self.variables for var, value in values]):
raise ValueError("Variable not in scope.")
phi = self if inplace else self.copy()
var_to_reduce = [var for var, value in values]
# index_to_keep -> j vector
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in var_to_reduce]
# index_to_reduce -> i vector
index_to_reduce = [self.variables.index(var) for var in var_to_reduce]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_reduce)]
K_j_j = self.K[np.ix_(index_to_reduce, index_to_reduce)]
h_i = self.h[index_to_keep]
h_j = self.h[index_to_reduce]
# The values for the reduced variables.
y = np.array([value for index,
value in sorted([(self.variables.index(var), value) for var,
value in values])]).reshape(len(index_to_reduce), 1)
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i
phi.h = h_i - np.dot(K_i_j, y)
phi.g = self.g + (np.dot(h_j.T, y) - (0.5 * np.dot(np.dot(y.T, K_j_j), y)))[0][0]
if not inplace:
return phi
def marginalize(self, variables, inplace=True):
u"""
Modifies the factor with marginalized values.
Let C(X,Y ; K, h, g) be some canonical form over X,Y where,
k = [[K_XX, K_XY], ; h = [[h_X],
[K_YX, K_YY]] [h_Y]]
In this case, the result of the integration operation is a canonical
from C (K', h', g') given by,
.. math:: K' = K_{XX} - K_{XY} * {K^{-1}}_{YY} * K_YX
.. math:: h' = h_X - K_{XY} * {K^{-1}}_{YY} * h_Y
.. math:: g' = g + 0.5 * (|Y| * log(2*pi) - log(|K_{YY}|) + {h^T}_Y * K_{YY} * h_Y)
Parameters
----------
variables: list or array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
CanonicalFactor or None :
if inplace=True (default) returns None
if inplace=False return a new CanonicalFactor instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi = CanonicalFactor(['X1', 'X2', 'X3'],
... np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
... np.array([[1], [4], [-1]]), -2)
>>> phi.K
array([[ 1, -1, 0],
[-1, 4, -2],
[ 0, -2, 4]])
>>> phi.h
array([[ 1],
[ 4],
[-1]])
>>> phi.g
-2
>>> phi.marginalize(['X3'])
>>> phi.K
array([[ 1., -1.],
[-1., 3.]])
>>> phi.h
array([[ 1. ],
[ 3.5]])
>>> phi.g
0.22579135
"""
if not isinstance(variables, (list, tuple, np.ndarray)):
raise TypeError("variables: Expected type list or array-like, "
"got type {var_type}".format(var_type=type(variables)))
if not all([var in self.variables for var in variables]):
raise ValueError("Variable not in scope.")
phi = self if inplace else self.copy()
# index_to_keep -> i vector
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in variables]
# index_to_marginalize -> j vector
index_to_marginalize = [self.variables.index(var) for var in variables]
K_i_i = self.K[np.ix_(index_to_keep, index_to_keep)]
K_i_j = self.K[np.ix_(index_to_keep, index_to_marginalize)]
K_j_i = self.K[np.ix_(index_to_marginalize, index_to_keep)]
K_j_j = self.K[np.ix_(index_to_marginalize, index_to_marginalize)]
K_j_j_inv = np.linalg.inv(K_j_j)
h_i = self.h[index_to_keep]
h_j = self.h[index_to_marginalize]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.K = K_i_i - np.dot(np.dot(K_i_j, K_j_j_inv), K_j_i)
phi.h = h_i - np.dot(np.dot(K_i_j, K_j_j_inv), h_j)
phi.g = self.g + 0.5 * (len(variables) * np.log(2 * np.pi) -
np.log(abs(np.linalg.det(K_j_j))) + np.dot(np.dot(h_j.T, K_j_j), h_j))[0][0]
if not inplace:
return phi
def _operate(self, other, operation, inplace=True):
"""
Gives the CanonicalFactor operation (product or divide) with
the other factor.
The product of two canonical factors over the same scope
X is simply:
C(K1, h1, g1) * C(K2, h2, g2) = C(K1+K2, h1+h2, g1+g2)
The division of canonical forms is defined analogously:
C(K1, h1, g1) / C(K2, h2, g2) = C(K1-K2, h1-h2, g1- g2)
When we have two canonical factors over different scopes X and Y,
we simply extend the scope of both to make their scopes match and
then perform the operation of the above equation. The extension of
the scope is performed by simply adding zero entries to both the K
matrices and the h vectors.
Parameters
----------
other: CanonicalFactor
The CanonicalFactor to be multiplied.
operation: String
'product' for multiplication operation and
'divide' for division operation.
Returns
-------
CanonicalFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalFactor instance.
Example
-------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalFactor
>>> phi1 = CanonicalFactor(['x1', 'x2', 'x3'],
np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
np.array([[1], [4], [-1]]), -2)
>>> phi2 = CanonicalFactor(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
>>> phi3 = phi1 * phi2
>>> phi3.K
array([[ 4., -3., 0.],
[-3., 8., -2.],
[ 0., -2., 4.]])
>>> phi3.h
array([ 6., 3., -1.])
>>> phi3.g
-1
>>> phi4 = phi1 / phi2
>>> phi4.K
array([[-2., 1., 0.],
[ 1., 0., -2.],
[ 0., -2., 4.]])
>>> phi4.h
array([-4., 5., -1.])
>>> phi4.g
-3
"""
if not isinstance(other, CanonicalFactor):
raise TypeError("CanonicalFactor object can only be multiplied or divided with "
"an another CanonicalFactor object. Got {other_type}, expected "
"CanonicalFactor.".format(other_type=type(other)))
phi = self if inplace else self.copy()
all_vars = self.variables + [var for var in other.variables if var not in self.variables]
no_of_var = len(all_vars)
self_var_index = [all_vars.index(var) for var in self.variables]
other_var_index = [all_vars.index(var) for var in other.variables]
def _extend_K_scope(K, index):
ext_K = np.zeros([no_of_var, no_of_var])
ext_K[np.ix_(index, index)] = K
return ext_K
def _extend_h_scope(h, index):
ext_h = np.zeros(no_of_var).reshape(no_of_var, 1)
ext_h[index] = h
return ext_h
phi.variables = all_vars
if operation == 'product':
phi.K = _extend_K_scope(self.K, self_var_index) + _extend_K_scope(other.K, other_var_index)
phi.h = _extend_h_scope(self.h, self_var_index) + _extend_h_scope(other.h, other_var_index)
phi.g = self.g + other.g
else:
phi.K = _extend_K_scope(self.K, self_var_index) - _extend_K_scope(other.K, other_var_index)
phi.h = _extend_h_scope(self.h, self_var_index) - _extend_h_scope(other.h, other_var_index)
phi.g = self.g - other.g
if not inplace:
return phi
|
dungvtdev/upsbayescpm
|
pgmpy/factors/continuous/CanonicalFactor.py
|
Python
|
mit
| 15,198
|
[
"Gaussian"
] |
05d8750ae6ecbcc753e04a13aaee2eff28a5a0a4712b3a5cbff471633822a8ef
|
#!/usr/bin/env python
from __future__ import print_function
from vtk import *
if __name__ == "__main__":
""" Main entry point of this python script """
#
# Load our table from a CSV file (covered in table2.py)
#
csv_source = vtkDelimitedTextReader()
csv_source.SetFieldDelimiterCharacters(",")
csv_source.SetHaveHeaders(True)
csv_source.SetDetectNumericColumns(True)
csv_source.SetFileName("table_data.csv")
csv_source.Update()
T = csv_source.GetOutput()
print("Table loaded from CSV file:")
T.Dump(6)
#
# Add a new row to the table
#
new_row = [8, "Luis", 68]
for i in range( T.GetNumberOfColumns()):
T.GetColumn(i).InsertNextValue( new_row[i] )
print("\nTable with new row appended:")
T.Dump(6)
#
# Extract row 3 out of the table into a Python list.
#
row = []
row_number = 3
for icol in range( T.GetNumberOfColumns() ):
row.append( T.GetColumn(icol).GetValue( row_number ) )
print("\nExtracted row 3:")
print(row)
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Infovis/Python/tables3.py
|
Python
|
bsd-3-clause
| 1,093
|
[
"VTK"
] |
c9c0ed6b10dc793229561748e9a4fd03ae90f1cf68d67c90dfd0c600aaf22a65
|
import logging
import urllib
from functools import partial
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from mitxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from markupsafe import escape
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access,
get_courses_by_university, sort_by_announcement)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import ModelDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from django_comment_client.utils import get_discussion_title
from student.models import UserTestGroup, CourseEnrollment
from util.cache import cache, cache_if_anonymous
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
import comment_client
log = logging.getLogger("mitx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, model_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, model_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('show_timezone', course.show_timezone)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.descriptor.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def check_for_active_timelimit_module(request, course_id, course):
"""
Looks for a timing module for the given user and course that is currently active.
If found, returns a context dict with timer-related values to enable display of time remaining.
"""
context = {}
# TODO (cpennington): Once we can query the course structure, replace this with such a query
timelimit_student_modules = StudentModule.objects.filter(student=request.user, course_id=course_id, module_type='timelimit')
if timelimit_student_modules:
for timelimit_student_module in timelimit_student_modules:
# get the corresponding section_descriptor for the given StudentModel entry:
module_state_key = timelimit_student_module.module_state_key
timelimit_descriptor = modulestore().get_instance(course_id, Location(module_state_key))
timelimit_module_cache = ModelDataCache.cache_for_descriptor_descendents(course.id, request.user,
timelimit_descriptor, depth=None)
timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor,
timelimit_module_cache, course.id, position=None)
if timelimit_module is not None and timelimit_module.category == 'timelimit' and \
timelimit_module.has_begun and not timelimit_module.has_ended:
location = timelimit_module.location
# determine where to go when the timer expires:
if timelimit_descriptor.time_expired_redirect_url is None:
raise Http404("no time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location))
context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url
# Fetch the remaining time relative to the end time as stored in the module when it was started.
# This value should be in milliseconds.
remaining_time = timelimit_module.get_remaining_time_in_ms()
context['timer_expiration_duration'] = remaining_time
context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation
return_url = reverse('jump_to', kwargs={'course_id': course_id, 'location': location})
context['timer_navigation_return_url'] = return_url
return context
def update_timelimit_module(user, course_id, model_data_cache, timelimit_descriptor, timelimit_module):
"""
Updates the state of the provided timing module, starting it if it hasn't begun.
Returns dict with timer-related values to enable display of time remaining.
Returns 'timer_expiration_duration' in dict if timer is still active, and not if timer has expired.
"""
context = {}
# determine where to go when the exam ends:
if timelimit_descriptor.time_expired_redirect_url is None:
raise Http404("No time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location))
context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url
if not timelimit_module.has_ended:
if not timelimit_module.has_begun:
# user has not started the exam, so start it now.
if timelimit_descriptor.duration is None:
raise Http404("No duration specified at this location: {} ".format(timelimit_module.location))
# The user may have an accommodation that has been granted to them.
# This accommodation information should already be stored in the module's state.
timelimit_module.begin(timelimit_descriptor.duration)
# the exam has been started, either because the student is returning to the
# exam page, or because they have just visited it. Fetch the remaining time relative to the
# end time as stored in the module when it was started.
context['timer_expiration_duration'] = timelimit_module.get_remaining_time_in_ms()
# also use the timed module to determine whether top-level navigation is visible:
context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation
return context
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug('User %s tried to view course %s but is not enrolled' % (user, course.location.url()))
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, model_data_cache, course.id)
if course_module is None:
log.warning('If you see this, something went wrong: if we got this'
' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, model_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'content': '',
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.MITX_FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa')
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.MITX_FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module(request.user, request,
section_descriptor.location,
section_model_data_cache, course_id, position, depth=None)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
# check here if this section *is* a timed module.
if section_module.category == 'timelimit':
timer_context = update_timelimit_module(user, course_id, student_module_cache,
section_descriptor, section_module)
if 'timer_expiration_duration' in timer_context:
context.update(timer_context)
else:
# if there is no expiration defined, then we know the timer has expired:
return HttpResponseRedirect(timer_context['time_expired_redirect_url'])
else:
# check here if this page is within a course that has an active timed module running. If so, then
# add in the appropriate timer information to the rendering context:
context.update(check_for_active_timelimit_module(request, course_id, course))
context['content'] = section_module.runtime.render(section_module, None, 'student_view').content
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['content'] = render_to_string('courseware/welcome-back.html',
{'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url})
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
['i4x', course_location.org, course_location.course, None, module_id],
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404("No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404("This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
return render_to_response('courseware/info.html', {'request': request, 'course_id': course_id, 'cache': None,
'course': course, 'staff_access': staff_access, 'masquerade': masq})
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if settings.MITX_FEATURES.get('ENABLE_MKTG_SITE', False):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'))
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'show_courseware_link': show_courseware_link})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response('courseware/mktg_coming_soon.html',
{'course_id': course_id})
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response('courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
})
def render_notifications(request, course, notifications):
context = {
'notifications': notifications,
'get_discussion_title': partial(get_discussion_title, request=request, course=course),
'course': course,
}
return render_to_string('courseware/notifications.html', context)
@login_required
def news(request, course_id):
course = get_course_with_access(request.user, course_id, 'load')
notifications = comment_client.get_notifications(request.user.id)
context = {
'course': course,
'content': render_notifications(request, course, notifications),
}
return render_to_response('courseware/news.html', context)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def progress(request, course_id, student_id=None):
""" User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
model_data_cache = ModelDataCache.cache_for_descriptor_descendents(
course_id, student, course, depth=None)
courseware_summary = grades.progress_summary(student, request, course,
model_data_cache)
grade_summary = grades.grade(student, request, course, model_data_cache)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
}
context.update()
return render_to_response('courseware/progress.html', context)
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
|
pdehaye/theming-edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 32,013
|
[
"VisIt"
] |
23a6548d3287e8c45cbb41642ee4a11c42e052c6797dba2f816ec6eeaf796273
|
#!/usr/bin/env python
import sys, os, random
from pprint import pprint, pformat
from LunchPlace import LunchPlace
from LunchVisit import LunchVisit
from LuncherPreferences import LuncherPreferences
def read_places(filename):
placenames = {}
places = []
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
fields = line.split(',')
name = fields[0]
tags = fields[1:]
if name in placenames:
continue
placenames[name] = True
place = LunchPlace(name)
for tag in tags:
place.add_tag(tag)
places.append(place)
sys.stderr.write("Loaded %s\n" % filename)
return places
def read_history(filename):
history = []
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
date, name = line.split(',')
visit = LunchVisit(name, date)
history.append(visit)
sys.stderr.write("Loaded %s\n" % filename)
return history
def read_luncher_prefs(filename):
name = os.path.basename(filename).replace(".prefs", "")
prefs = LuncherPreferences(name)
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
weight, name = line.split(',')
prefs.update(name, weight)
prefs.normalize()
sys.stderr.write("Loaded %s\n" % filename)
return prefs
def read_lunchers_prefs(filenames):
lunchers_preferences = []
for filename in filenames:
prefs = read_luncher_prefs(filename)
lunchers_preferences.append(prefs)
return lunchers_preferences
def load_files(places_filename, history_filename, luncher_prefs_filenames):
places = read_places(places_filename)
history = read_history(history_filename)
lunchers_preferences = read_lunchers_prefs(luncher_prefs_filenames)
# check that places in the history and user prefs are known PLACES
# otherwise bad things could happen
places_dict = {}
for place in places:
places_dict[place.name] = True
for visit in history:
assert(visit.place in places_dict)
for prefs in lunchers_preferences:
for place in prefs.preferences:
assert(place in places_dict)
return (places, history, lunchers_preferences)
def pick_my_lunch(place_probs):
# got this code here http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
totals = []
running_total = 0
for place,prob in place_probs.items():
running_total += prob
totals.append((place, running_total))
rnd = random.random() * running_total
for i, place_total in enumerate(totals):
place, total = place_total
if rnd < total:
return place
def main(args):
if len(args[1:]) < 3:
sys.stderr.write("usage: %s <places> <history> <user-preferences>+\n" % args[0])
sys.stderr.write("You must have 1 or more user preferences!\n")
return 1
places_filename = args[1]
history_filename = args[2]
luncher_prefs_filenames = args[3:]
places, history, lunchers_prefs = load_files(places_filename, history_filename, luncher_prefs_filenames)
total_preference_weight = 0
for luncher_pref in lunchers_prefs:
total_preference_weight += sum(luncher_pref.preferences.values())
# TODO: pick a place based on history and preferences
# assign each place a probability
# choose from the distribution
probabilities = {}
for place in places:
place_total_weight = 0
for luncher_prefs in lunchers_prefs:
if place.name in luncher_prefs.preferences:
place_total_weight += luncher_prefs.preferences[place.name]
probabilities[place.name] = place_total_weight / total_preference_weight
most_recent_visit = None
for visit in history:
if visit.place != place.name: continue
if most_recent_visit is None or most_recent_visit.date < visit.date:
most_recent_visit = visit
if most_recent_visit is not None:
probabilities[place.name] *= most_recent_visit.discount
# normalize back to actual probabilities, just for fun
total_unnormalized_probs = sum(probabilities.values())
for place in probabilities:
probabilities[place] = probabilities[place] * 100 / total_unnormalized_probs
sys.stderr.write("\nProbabilities:\n")
probabilities_list = sorted([(round(val, 2), key) for key,val in probabilities.iteritems()], reverse=True)
sys.stderr.write(pformat(probabilities_list) + "\n\n")
# time to pick lunch!
selection = pick_my_lunch(probabilities)
print "==> " + selection
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ryanaustincarlson/markovlunch
|
pick_lunch.py
|
Python
|
mit
| 5,036
|
[
"VisIt"
] |
5ace1a27c81b63800f6799bf68198752c2409e2180315ce3bcc92e42348af27c
|
#
# Copyright (C) 2007, Mark Lee
#
# http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 446 $
# $Date: 2009-01-22 22:20:21 -0500 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/Agent.py $
class Agent(object):
# (string) -> void
def init(self, task_spec):
pass
# () -> void
def setup(self):
pass
# (Observation) -> Action
def start(self, observation):
pass
# (double, Observation) -> Action
def step(self, reward, observation):
pass
# (double) -> int
def end(self, reward):
pass
# () -> void
def cleanup(self):
pass
# (string) -> string
def message(self, msg):
pass
|
evenmarbles/rlglued
|
rlglued/agent/agent.py
|
Python
|
bsd-3-clause
| 1,340
|
[
"Brian"
] |
32b17976adc4cbeb6ddf5781f7f12298d840228984850f718517175a2d7cb866
|
"""
MRS.api
-------
Functions and classes for representation and analysis of MRS data. This is the
main module to use when performing routine analysis of MRS data.
"""
import numpy as np
import scipy.stats as stats
import nibabel as nib
import warnings
import MRS.analysis as ana
import MRS.utils as ut
import MRS.freesurfer as fs
class GABA(object):
"""
Class for analysis of GABA MRS.
"""
def __init__(self, in_data, line_broadening=5, zerofill=100,
filt_method=None, min_ppm=-0.7, max_ppm=4.3):
"""
Parameters
----------
in_data : str
Path to a nifti file containing MRS data.
line_broadening : float
How much to broaden the spectral line-widths (Hz)
zerofill : int
How many zeros to add to the spectrum for additional spectral
resolution
min_ppm, max_ppm : float
The limits of the spectra that are represented
fit_lb, fit_ub : float
The limits for the part of the spectrum for which we fit the
creatine and GABA peaks.
"""
if isinstance(in_data, str):
# The nifti files follow the strange nifti convention, but we want
# to use our own logic, which is transients on dim 0 and time on
# dim -1:
self.raw_data = np.transpose(nib.load(in_data).get_data(),
[1,2,3,4,5,0]).squeeze()
elif isinstance(in_data, np.ndarray):
self.raw_data = in_data
w_data, w_supp_data = ana.coil_combine(self.raw_data)
f_hz, w_supp_spectra = ana.get_spectra(w_supp_data,
line_broadening=line_broadening,
zerofill=zerofill,
filt_method=filt_method)
self.w_supp_spectra = w_supp_spectra
# Often, there will be some small offset from the on-resonance
# frequency, which we can correct for. We fit a Lorentzian to each of
# the spectra from the water-suppressed data, so that we can get a
# phase-corrected estimate of the frequency shift, instead of just
# relying on the frequency of the maximum:
self.w_supp_lorentz = np.zeros(w_supp_spectra.shape[:-1] + (6,))
for ii in range(self.w_supp_lorentz.shape[0]):
for jj in range(self.w_supp_lorentz.shape[1]):
self.w_supp_lorentz[ii,jj]=\
ana._do_lorentzian_fit(f_hz, w_supp_spectra[ii,jj])
# We store the frequency offset for each transient/echo:
self.freq_offset = self.w_supp_lorentz[..., 0]
# But for now, we average over all the transients/echos for the
# correction:
mean_freq_offset = np.mean(self.w_supp_lorentz[..., 0])
f_hz = f_hz - mean_freq_offset
self.water_fid = w_data
self.w_supp_fid = w_supp_data
# This is the time-domain signal of interest, combined over coils:
self.data = ana.subtract_water(w_data, w_supp_data)
_, spectra = ana.get_spectra(self.data,
line_broadening=line_broadening,
zerofill=zerofill,
filt_method=filt_method)
self.f_hz = f_hz
# Convert from Hz to ppm and extract the part you are interested in.
f_ppm = ut.freq_to_ppm(self.f_hz)
idx0 = np.argmin(np.abs(f_ppm - min_ppm))
idx1 = np.argmin(np.abs(f_ppm - max_ppm))
self.idx = slice(idx1, idx0)
self.f_ppm = f_ppm
self.echo_off = spectra[:, 1]
self.echo_on = spectra[:, 0]
# Calculate sum and difference:
self.diff_spectra = self.echo_on - self.echo_off
self.sum_spectra = self.echo_off + self.echo_on
def reset_fits(self):
"""
This is used to restore the original state of the fits.
"""
for attr in ['creatine_params', 'creatine_model', 'creatine_signal',
'cr_idx', 'creatine_auc', 'gaba_params', 'gaba_model',
'gaba_signal', 'gaba_idx', 'gaba_auc', 'glx_params',
'glx_model', 'glx_signal', 'glx_idx', 'glx_auc' ]:
if hasattr(self, attr):
self.__delattr__(attr)
def fit_water(self, line_broadening=5, zerofill=100,
filt_method=None, min_ppm=-5.0, max_ppm=5.0):
"""
"""
# Get the water spectrum as well:
f_hz, w_spectra = ana.get_spectra(self.water_fid,
line_broadening=line_broadening,
zerofill=zerofill,
filt_method=filt_method)
f_ppm = ut.freq_to_ppm(f_hz)
# Averaging across echos:
self.water_spectra = np.mean(w_spectra, 1)
model, signal, params = ana.fit_lorentzian(self.water_spectra,
self.f_ppm,
lb=min_ppm,
ub=max_ppm)
# Store the params:
self.water_model = model
self.water_signal = signal
self.water_params = params
self.water_idx = ut.make_idx(self.f_ppm, min_ppm, max_ppm)
mean_params = stats.nanmean(params, 0)
self.water_auc = self._calc_auc(ut.lorentzian, params, self.water_idx)
def _calc_auc(self, model, params, idx):
"""
Helper function to calculate the area under the curve of a model
Parameters
----------
model : callable
Probably either ut.lorentzian or ut.gaussian, but any function will
do, as long as its first parameter is an array of frequencies and
the third parameter controls its amplitude.
params : ndarray
Each row of these should contain exactly the number of params that
the model function expects after the first (frequency)
parameter. The second column should control the amplitude of the
function.
idx :
Indices to the part of the spectrum over which AUC will be
calculated.
"""
# Here's what we are going to do: For each transient, we generate
# the spectrum for two distinct sets of parameters: one is exactly as
# fit to the data, the other is the same expect with amplitude set to
# 0. To calculate AUC, we take the difference between them:
auc = np.zeros(params.shape[0])
delta_f = np.abs(self.f_ppm[1]-self.f_ppm[0])
p = np.copy(params)
for t in range(auc.shape[0]):
model1 = model(self.f_ppm[idx], *p[t])
# This controls the amplitude in both the Gaussian and the
# Lorentzian:
p[t, 1] = 0
model0 = model(self.f_ppm[idx], *p[t])
auc[t] = np.sum((model1 - model0) * delta_f)
return auc
def _outlier_rejection(self, params, model, signal, ii):
"""
Helper function to reject outliers
DRY!
"""
# Z score across repetitions:
z_score = (params - np.mean(params, 0))/np.std(params, 0)
# Silence warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outlier_idx = np.where(np.abs(z_score)>3.0)[0]
nan_idx = np.where(np.isnan(params))[0]
outlier_idx = np.unique(np.hstack([nan_idx, outlier_idx]))
ii[outlier_idx] = 0
model[outlier_idx] = np.nan
signal[outlier_idx] = np.nan
params[outlier_idx] = np.nan
return model, signal, params, ii
def fit_creatine(self, reject_outliers=3.0, fit_lb=2.7, fit_ub=3.5):
"""
Fit a model to the portion of the summed spectra containing the
creatine and choline signals.
Parameters
----------
reject_outliers : float or bool
If set to a float, this is the z score threshold for rejection (on
any of the parameters). If set to False, no outlier rejection
fit_lb, fit_ub : float
What part of the spectrum (in ppm) contains the creatine peak.
Default (2.7, 3.5)
Note
----
We use upper and lower bounds that are a variation on the bounds
mentioned on the GANNET ISMRM2013 poster [1]_.
[1] RAE Edden et al (2013). Gannet GABA analysis toolkit. ISMRM
conference poster.
"""
# We fit a two-lorentz function to this entire chunk of the spectrum,
# to catch both choline and creatine
model, signal, params = ana.fit_two_lorentzian(self.sum_spectra,
self.f_ppm,
lb=fit_lb,
ub=fit_ub)
# Use an array of ones to index everything but the outliers and nans:
ii = np.ones(signal.shape[0], dtype=bool)
# Reject outliers:
if reject_outliers:
model, signal, params, ii = self._outlier_rejection(params,
model,
signal,
ii)
# We'll keep around a private attribute to tell us which transients
# were good (this is for both creatine and choline):
self._cr_transients = np.where(ii)
# Now we separate choline and creatine params from each other (remember
# that they both share offset and drift!):
self.choline_params = params[:, (0,2,4,6,8,9)]
self.creatine_params = params[:, (1,3,5,7,8,9)]
self.cr_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub)
# We'll need to generate the model predictions from these parameters,
# because what we're holding in 'model' is for both together:
self.choline_model = np.zeros((self.creatine_params.shape[0],
np.abs(self.cr_idx.stop-self.cr_idx.start)))
self.creatine_model = np.zeros((self.choline_params.shape[0],
np.abs(self.cr_idx.stop-self.cr_idx.start)))
for idx in range(self.creatine_params.shape[0]):
self.creatine_model[idx] = ut.lorentzian(self.f_ppm[self.cr_idx],
*self.creatine_params[idx])
self.choline_model[idx] = ut.lorentzian(self.f_ppm[self.cr_idx],
*self.choline_params[idx])
self.creatine_signal = signal
self.creatine_auc = self._calc_auc(ut.lorentzian,
self.creatine_params,
self.cr_idx)
self.choline_auc = self._calc_auc(ut.lorentzian,
self.choline_params,
self.cr_idx)
def _fit_helper(self, fit_spectra, reject_outliers, fit_lb, fit_ub,
fit_func):
"""
This is a helper function for fitting different segments of the spectrum
with Gaussian functions (GLX and GABA).
Parameters
----------
fit_spectra : ndarray
The data to fit
reject_outliers : float or bool
Z score for outlier rejection. If set to `False`, not outlier
rejection.
fit_lb : float
The lower bound of the part of the ppm scale for which the Gaussian
is fit.
fit_ub : float
The upper bound of the part of the scale fit.
fit_func: callable
e.g. `fit_gaussian`
Returns
-------
choose_transients : tuple
Indices into the original data's transients dimension to select
non-outlier transients. If reject_outliers is set to `False`, this is
all the transients
model : ndarray
The model predicition in each transient, based on the fit.
signal : ndarray
The original signal in this part of the difference spectrum.
params : ndarray
The Gaussian parameters in each transient as fit.
this_idx : slice object
A slice into the part of the spectrum that is fit
"""
# fit_idx should already be set from fitting the creatine params:
model, signal, params = fit_func(fit_spectra,
self.f_ppm,
lb=fit_lb,
ub=fit_ub)
# We'll use these indices to reject outliers (or not):
ii = np.ones(signal.shape[0], dtype=bool)
# Reject outliers:
if reject_outliers:
model, signal, params, ii = self._outlier_rejection(params,
model,
signal,
ii)
choose_transients = np.where(ii)
this_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub)
return choose_transients, model, signal, params, this_idx
def _xval_choose_funcs(self, fit_spectra, reject_outliers, fit_lb, fit_ub,
fitters=[ana.fit_gaussian,ana.fit_two_gaussian],
funcs = [ut.gaussian, ut.two_gaussian]):
""" Helper function used to do split-half xvalidation to select among
alternative models"""
set1 = fit_spectra[::2]
set2 = fit_spectra[1::2]
errs = []
signal_select = []
# We can loop over functions and try each one out, checking the
# error in each:
for fitter in fitters:
models = []
signals = []
for this_set in [set1, set2]:
choose_transients, model, signal, params, this_idx =\
self._fit_helper(this_set, reject_outliers,
fit_lb, fit_ub, fitter)
models.append(np.nanmean(model[choose_transients], 0))
signals.append(np.nanmean(signal[choose_transients], 0))
signal_select.append(signal[choose_transients])
#Cross-validate!
errs.append(np.mean([ut.rmse(models[0], signals[1]),
ut.rmse(models[1], signals[0])]))
# We really only need to look at the first two:
signal_err = ut.rmse(np.nanmean(signal_select[0], 0),
np.nanmean(signal_select[1], 0))
# Based on the errors, choose a function. Also report errors:
return (fitters[np.argmin(errs)], funcs[np.argmin(errs)], np.min(errs),
signal_err)
def _xval_model_error(self, fit_spectra, reject_outliers, fit_lb, fit_ub,
fitter, func):
"""
Helper function for calculation of split-half cross-validation model
error and signal reliability.
"""
set1 = fit_spectra[::2]
set2 = fit_spectra[1::2]
errs = []
signal_select = []
models = []
signals = []
for this_set in [set1, set2]:
choose_transients, model, signal, params, this_idx =\
self._fit_helper(this_set, reject_outliers,
fit_lb, fit_ub, fitter)
models.append(np.nanmean(model[choose_transients], 0))
signals.append(np.nanmean(signal[choose_transients], 0))
signal_select.append(signal[choose_transients])
#Cross-validation error estimation:
model_err = np.mean([ut.rmse(models[0], signals[1]),
ut.rmse(models[1], signals[0])])
# Also for the signal:
signal_err = ut.rmse(np.nanmean(signal_select[0], 0),
np.nanmean(signal_select[1], 0))
# Based on the errors, choose a function. Also report errors:
return model_err, signal_err
def fit_gaba(self, reject_outliers=3.0, fit_lb=2.8, fit_ub=3.4,
phase_correct=True, fit_func=None):
"""
Fit either a single Gaussian, or a two-Gaussian to the GABA 3 PPM
peak.
Parameters
----------
reject_outliers : float
Z-score criterion for rejection of outliers, based on their model
parameter
fit_lb, fit_ub : float
Frequency bounds (in ppm) for the region of the spectrum to be
fit.
phase_correct : bool
Where to perform zero-order phase correction based on the fit of
the creatine peaks in the sum spectra
fit_func : None or callable (default None).
If this is set to `False`, an automatic selection will take place,
choosing between a two-Gaussian and a single Gaussian, based on a
split-half cross-validation procedure. Otherwise, the requested
callable function will be fit. Needs to conform to the conventions
of `fit_gaussian`/`fit_two_gaussian` and
`ut.gaussian`/`ut.two_gaussian`.
"""
# We need to fit the creatine, so that we know which transients to
# exclude in fitting this peak:
if not hasattr(self, 'creatine_params'):
self.fit_creatine()
fit_spectra = np.ones(self.diff_spectra.shape) * np.nan
# Silence warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit_spectra =\
self.diff_spectra[self._cr_transients].copy()
if phase_correct:
for ii, this_spec in enumerate(fit_spectra):
# Silence warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit_spectra[ii] = ut.phase_correct_zero(this_spec,
self.creatine_params[self._cr_transients][ii, 3])
if fit_func is None:
# Cross-validate!
fitter, self.gaba_func, self.gaba_model_err, self.gaba_signal_err=\
self._xval_choose_funcs(fit_spectra,
reject_outliers,
fit_lb, fit_ub)
# Otherwise, you had better supply a couple of callables that can be
# used to fit these spectra!
else:
fitter = fit_func[0]
self.gaba_func = fit_func[1]
self.gaba_model_err, self.gaba_signal_err = \
self._xval_model_error(fit_spectra, reject_outliers,
fit_lb, fit_ub, fitter, self.gaba_func)
# Either way, we end up fitting to everything in the end:
choose_transients, model, signal, params, this_idx = self._fit_helper(
fit_spectra, reject_outliers,
fit_lb, fit_ub, fitter)
self._gaba_transients = choose_transients
self.gaba_model = model
self.gaba_signal = signal
self.gaba_params = params
self.gaba_idx = this_idx
mean_params = stats.nanmean(params, 0)
self.gaba_auc = self._calc_auc(self.gaba_func, params, self.gaba_idx)
def fit_glx(self, reject_outliers=3.0, fit_lb=3.6, fit_ub=3.9,
fit_func=None):
"""
Fit a Gaussian function to the Glu/Gln (GLX) peak at 3.75ppm, +/-
0.15ppm [Hurd2004]_. Compare this model to a model
that treats the Glx signal as two gaussian peaks. Glx signal
at. Select between them based on cross-validation
Parameters
----------
reject_outliers : float or bool
If set to a float, this is the z score threshold for rejection (on
any of the parameters). If set to False, no outlier rejection
fit_lb, fit_ub : float
What part of the spectrum (in ppm) contains the GLX peak.
Default (3.5, 4.5)
scalefit : boolean
If this is set to true, attempt is made to tighten the fit to the
peak with a second round of fitting where the fitted curve
is fit with a scale factor. (default false)
References
----------
.. [Hurd2004] 2004, Measurement of brain glutamate using TE-averaged
PRESS at 3T
"""
# Use everything:
fit_spectra = self.diff_spectra.copy()
if fit_func is None:
# Cross-validate!
fitter, self.glx_func, self.glx_model_err, self.glx_signal_err=\
self._xval_choose_funcs(fit_spectra,
reject_outliers,
fit_lb, fit_ub)
# Otherwise, you had better supply a couple of callables that can be
# used to fit these spectra!
else:
fitter = fit_func[0]
self.glx_func = fit_func[1]
self.glx_model_err, self.glx_signal_err = \
self._xval_model_error(fit_spectra, reject_outliers,
fit_lb, fit_ub, fitter, self.glx_func)
# Do it!
choose_transients, model, signal, params, this_idx = self._fit_helper(
fit_spectra, reject_outliers,
fit_lb, fit_ub, fitter)
self._glx_transients = choose_transients
self.glx_model = model
self.glx_signal = signal
self.glx_params = params
self.glx_idx = this_idx
mean_params = stats.nanmean(params, 0)
self.glx_auc = self._calc_auc(self.glx_func, params, self.glx_idx)
def fit_naa(self, reject_outliers=3.0, fit_lb=1.8, fit_ub=2.4,
phase_correct=True):
"""
Fit a Lorentzian function to the NAA peak at ~ 2 ppm. Example of
fitting inverted peak: Foerster et al. 2013, An imbalance between
excitatory and inhibitory neurotransmitters in amyothrophic lateral
sclerosis revealed by use of 3T proton MRS
"""
model, signal, params = ana.fit_lorentzian(self.diff_spectra,
self.f_ppm,
lb=fit_lb,
ub=fit_ub)
# Store the params:
self.naa_model = model
self.naa_signal = signal
self.naa_params = params
self.naa_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub)
mean_params = stats.nanmean(params, 0)
self.naa_auc = self._calc_auc(ut.lorentzian, params, self.naa_idx)
def fit_glx2(self, reject_outliers=3.0, fit_lb=3.6, fit_ub=3.9,
phase_correct=True, scalefit=False):
"""
Parameters
----------
reject_outliers : float or bool
If set to a float, this is the z score threshold for rejection (on
any of the parameters). If set to False, no outlier rejection
fit_lb, fit_ub : float
What part of the spectrum (in ppm) contains the creatine peak.
Default (3.5, 4.2)
scalefit : boolean
If this is set to true, attempt is made to tighten the fit to the
peak with a second round of fitting where the fitted curve
is fit with a scale factor. (default false)
"""
if not hasattr(self, 'creatine_params'):
self.fit_creatine()
fit_spectra = self.diff_spectra
# We fit a two-gaussian function to this entire chunk of the spectrum,
# to catch both glx peaks
model, signal, params = ana.fit_two_gaussian(fit_spectra,
self.f_ppm,
lb=fit_lb,
ub=fit_ub)
# Use an array of ones to index everything but the outliers and nans:
ii = np.ones(signal.shape[0], dtype=bool)
# Reject outliers:
if reject_outliers:
model, signal, params, ii = self._outlier_rejection(params,
model,
signal,
ii)
# We'll keep around a private attribute to tell us which transients
# were good:
self._glx2_transients = np.where(ii)
# Now we separate params of the two glx peaks from each other
# (remember that they both share offset and drift!):
self.glxp1_params = params[:, (0, 2, 4, 6, 7)]
self.glxp2_params = params[:, (1, 3, 5, 6, 7)]
self.glx2_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub)
# We'll need to generate the model predictions from these parameters,
# because what we're holding in 'model' is for both together:
self.glxp1_model = np.zeros((self.glxp1_params.shape[0],
np.abs(self.glx2_idx.stop-self.glx2_idx.start)))
self.glxp2_model = np.zeros((self.glxp2_params.shape[0],
np.abs(self.glx2_idx.stop-self.glx2_idx.start)))
for idx in range(self.glxp2_params.shape[0]):
self.glxp2_model[idx] = ut.gaussian(self.f_ppm[self.glx2_idx],
*self.glxp2_params[idx])
self.glxp1_model[idx] = ut.gaussian(self.f_ppm[self.glx2_idx],
*self.glxp1_params[idx])
if scalefit:
combinedmodel = self.glxp2_model + self.glxp1_model
scalefac, scalemodel = ana._do_scale_fit(
self.f_ppm[self.glx2_idx], signal,combinedmodel)
# Reject outliers:
scalemodel, signal, params, ii = self._rm_outlier_by_amp(params,
scalemodel,
signal,
ii)
self.glx2_model = scalemodel
else:
self.glx2_model = self.glxp1_model + self.glxp2_model
self.glx2_signal = signal
self.glx2_auc = (
self._calc_auc(ut.gaussian, self.glxp2_params, self.glx2_idx) +
self._calc_auc(ut.gaussian, self.glxp1_params, self.glx2_idx))
def _rm_outlier_by_amp(self, params, model, signal, ii):
"""
Helper function to reject outliers based on mean amplitude
"""
# # mean amplitudes per transient
# meanamps = np.mean(model,1)
# max amplitudes
maxamps = np.nanmax(np.abs(model),0)
# zscore
# z_score = (meanamps - np.nanmean(meanamps,0))/np.nanstd(meanamps,0)
z_score = (maxamps - np.nanmean(maxamps,0))/np.nanstd(maxamps,0)
print z_score
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outlier_idx = np.where(np.abs(z_score)>2.0)[0]
nan_idx = np.where(np.isnan(params))[0]
outlier_idx = np.unique(np.hstack([nan_idx, outlier_idx]))
ii[outlier_idx] = 0
print sum(ii)
model[outlier_idx] = np.nan
signal[outlier_idx] = np.nan
params[outlier_idx] = np.nan
return model, signal, params, ii
def est_gaba_conc(self):
"""
Estimate gaba concentration based on equation adapted from Sanacora
1999, p1045
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder,
F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
$\gamma$-aminobutyric acid levels in depressed patients determined by
proton magnetic resonance spectroscopy. Archives of general psychiatry,
56(11), 1043.
"""
# need gaba_auc and creatine_auc
if not hasattr(self, 'gaba_params'):
self.fit_gaba()
# estimate [GABA] according to equation9
gaba_conc_est = self.gaba_auc / self.creatine_auc * 1.5 * 9.0
self.gaba_conc_est = gaba_conc_est
def voxel_seg(self, segfile, MRSfile):
"""
add voxel segmentation info
Parameters
----------
segfile : str
Path to nifti file with segmentation info (e.g. XXXX_aseg.nii.gz)
MRSfile : str
Path to MRS nifti file
"""
total, grey, white, csf, nongmwm, pGrey, pWhite, pCSF, pNongmwm =\
fs.MRSvoxelStats(segfile, MRSfile)
self.pGrey = pGrey
self.pWhite = pWhite
self.pCSF = pCSF
self.pNongmwm = pNongmwm
class SingleVoxel(object):
"""
Class for representation and analysis of single voxel (SV) -PROBE
experiments.
"""
def __init__(self, in_file, line_broadening=5, zerofill=100,
filt_method=None, min_ppm=-0.7, max_ppm=4.3):
"""
Parameters
----------
in_file : str
Path to a nifti file with SV-PROBE MRS data.
line_broadening : float
How much to broaden the spectral line-widths (Hz)
zerofill : int
How many zeros to add to the spectrum for additional spectral
resolution
min_ppm, max_ppm : float
The limits of the spectra that are represented
fit_lb, fit_ub : float
The limits for the part of the spectrum for which we fit the
creatine and GABA peaks.
"""
self.raw_data = np.transpose(nib.load(in_file).get_data(),
[1,2,3,4,5,0]).squeeze()
w_data, w_supp_data = ana.coil_combine(self.raw_data, w_idx = range(8),
coil_dim=1)
# We keep these around for reference, as private attrs
self._water_data = w_data
self._w_supp_data = w_supp_data
# This is the time-domain signal of interest, combined over coils:
self.data = ana.subtract_water(w_data, w_supp_data)
f_hz, spectra = ana.get_spectra(self.data,
line_broadening=line_broadening,
zerofill=zerofill,
filt_method=filt_method)
self.f_hz = f_hz
# Convert from Hz to ppm and extract the part you are interested in.
f_ppm = ut.freq_to_ppm(self.f_hz)
idx0 = np.argmin(np.abs(f_ppm - min_ppm))
idx1 = np.argmin(np.abs(f_ppm - max_ppm))
self.idx = slice(idx1, idx0)
self.f_ppm = f_ppm
self.spectra = spectra[:,self.idx]
|
arokem/MRS-old
|
MRS/api.py
|
Python
|
mit
| 31,407
|
[
"Gaussian"
] |
682188465ba0a10cd6a59baa0823282b273c61d21d61d55041a9bc0206b65958
|
import ast
import datetime
import re
import secrets
import time
from collections import defaultdict
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.functional import Promise
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
bulk_cached_fetch,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_choice_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
clause = f'{field} IN %s'
query = query.extra(
where=[clause], params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(recipient_id: int, recipient_type: int,
recipient_type_id: Optional[int]) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: 'Recipient') -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'realm_emoji:{realm.id}'
def get_active_realm_emoji_cache_key(realm: 'Realm') -> str:
return f'active_realm_emoji:{realm.id}'
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
MAX_REALM_REDIRECT_URL_LENGTH = 128
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = ['Google', 'Email', 'GitHub', 'LDAP', 'Dev',
'RemoteUser', 'AzureAD', 'SAML', 'GitLab', 'Apple']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
WILDCARD_MENTION_THRESHOLD = 15
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# Redirect URL if the Realm has moved to another server
deactivated_redirect = models.URLField(max_length=MAX_REALM_REDIRECT_URL_LENGTH, null=True)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
invite_by_admins_only: bool = models.BooleanField(default=False)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column='max_invites')
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS, default=2**31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
]
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(
default=POLICY_MEMBERS_ONLY)
USER_GROUP_EDIT_POLICY_MEMBERS = 1
USER_GROUP_EDIT_POLICY_ADMINS = 2
user_group_edit_policy: int = models.PositiveSmallIntegerField(
default=USER_GROUP_EDIT_POLICY_MEMBERS)
USER_GROUP_EDIT_POLICY_TYPES = [
USER_GROUP_EDIT_POLICY_MEMBERS,
USER_GROUP_EDIT_POLICY_ADMINS,
]
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Global policy for who is allowed to use wildcard mentions in
# streams with a large number of subscribers. Anyone can use
# wildcard mentions in small streams regardless of this setting.
WILDCARD_MENTION_POLICY_EVERYONE = 1
WILDCARD_MENTION_POLICY_MEMBERS = 2
WILDCARD_MENTION_POLICY_FULL_MEMBERS = 3
WILDCARD_MENTION_POLICY_STREAM_ADMINS = 4
WILDCARD_MENTION_POLICY_ADMINS = 5
WILDCARD_MENTION_POLICY_NOBODY = 6
wildcard_mention_policy: int = models.PositiveSmallIntegerField(
default=WILDCARD_MENTION_POLICY_STREAM_ADMINS,
)
WILDCARD_MENTION_POLICY_TYPES = [
WILDCARD_MENTION_POLICY_EVERYONE,
WILDCARD_MENTION_POLICY_MEMBERS,
WILDCARD_MENTION_POLICY_FULL_MEMBERS,
WILDCARD_MENTION_POLICY_STREAM_ADMINS,
WILDCARD_MENTION_POLICY_ADMINS,
WILDCARD_MENTION_POLICY_NOBODY,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js, setting_org.js
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 86400
allow_community_topic_editing: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = 'general'
INITIAL_PRIVATE_STREAM_NAME = 'core team'
STREAM_EVENTS_NOTIFICATION_TOPIC = ugettext_lazy('stream events')
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream", related_name="+", null=True, blank=True, on_delete=CASCADE,
)
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
}
# For old messages being automatically deleted
message_retention_days: int = models.IntegerField(null=False, default=-1)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = ugettext_lazy("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
'disabled': {
'name': "None",
'id': 0,
},
'jitsi_meet': {
'name': "Jitsi Meet",
'id': 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
# ID 4 reserved for optional Big Blue Button, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS['zoom'] = {
'name': "Zoom",
'id': 3,
}
if settings.BIG_BLUE_BUTTON_SECRET is not None and settings.BIG_BLUE_BUTTON_URL is not None:
VIDEO_CHAT_PROVIDERS['big_blue_button'] = {
'name': "Big Blue Button",
'id': 4
}
video_chat_provider: int = models.PositiveSmallIntegerField(default=VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
default_language=str,
default_twenty_four_hour_time = bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
message_content_delete_limit_seconds=int,
wildcard_mention_policy=int,
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = 'G'
ICON_UPLOADED = 'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES, max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of webapp navbar UI.
LOGO_DEFAULT = 'D'
LOGO_UPLOADED = 'U'
LOGO_SOURCES = (
(LOGO_DEFAULT, 'Default to Zulip'),
(LOGO_UPLOADED, 'Uploaded by administrator'),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT, choices=LOGO_SOURCES, max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600*24*7)
def get_active_emoji(self) -> Dict[str, Dict[str, Iterable[str]]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(self) -> Sequence['UserProfile']:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_admin_users(self) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_bot=False, is_active=True,
role__in=[UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER])
def get_human_billing_admin_users(self) -> Sequence['UserProfile']:
return UserProfile.objects.filter(Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True),
realm=self, is_bot=False, is_active=True)
def get_active_users(self) -> Sequence['UserProfile']:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(realm=self, is_bot=False,
role=UserProfile.ROLE_REALM_OWNER,
is_active=True)
def get_bot_domain(self) -> str:
return get_fake_email_domain()
def get_notifications_stream(self) -> Optional['Stream']:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional['Stream']:
if self.signup_notifications_stream is not None and not self.signup_notifications_stream.deactivated:
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600*24*7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum('size'))['size__sum']
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
def realm_post_delete_handler(sender: Any, **kwargs: Any) -> None:
# This would be better as a functools.partial, but for some reason
# Django doesn't call it even when it's registered as a post_delete handler.
flush_realm(sender, from_deletion=True, **kwargs)
post_save.connect(flush_realm, sender=Realm)
post_delete.connect(realm_post_delete_handler, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains: bool = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile", blank=True, null=True, on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=ugettext_lazy("Invalid characters in emoji name"))])
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(realm: Realm,
only_active_emojis: bool=False) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related('author')
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict['name']] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
cache_set(get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> None:
regex = re.compile(r'^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$')
error_msg = _('Invalid filter pattern. Valid characters are {}.').format(
'[ a-zA-Z_#=/:+!-]',)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except re.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value: str) -> None:
regex = re.compile(r'^([\.\/:a-zA-Z0-9#_?=&;~-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;~-]*$')
if not regex.match(value):
raise ValidationError(_('Invalid URL format string.'))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the Markdown processor. See "Custom filters" in the settings UI.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField(validators=[filter_pattern_validator])
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_realm_filters_cache_key(realm_id: int) -> str:
return f'{cache.KEY_PREFIX}:all_realm_filters:{realm_id}'
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache: Dict[int, List[Tuple[str, str, int]]] = {}
def realm_in_local_realm_filters_cache(realm_id: int) -> bool:
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id: int) -> List[Tuple[str, str, int]]:
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters() -> Dict[int, List[Tuple[str, str, int]]]:
filters: DefaultDict[int, List[Tuple[str, str, int]]] = defaultdict(list)
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern,
realm_filter.url_format_string,
realm_filter.id))
return filters
def flush_realm_filter(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserProfile(AbstractBaseUser, PermissionsMixin):
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', "\\", '>', '"', '@']
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: 'Generic bot',
INCOMING_WEBHOOK_BOT: 'Incoming webhook',
OUTGOING_WEBHOOK_BOT: 'Outgoing webhook',
EMBEDDED_BOT: 'Embedded bot',
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name
# over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
# ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# Users with this flag set are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
can_forge_sender: bool = models.BooleanField(default=False, db_index=True)
# Users with this flag set can create other users via API.
can_create_users: bool = models.BooleanField(default=False, db_index=True)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default='zulip')
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream", null=True, related_name="+", on_delete=CASCADE,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# UI vars
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
left_side_userlist: bool = models.BooleanField(default=False)
# display settings
default_language: str = models.CharField(default='en', max_length=MAX_LANGUAGE_ID_LENGTH)
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=False)
COLOR_SCHEME_AUTOMATIC = 1
COLOR_SCHEME_NIGHT = 2
COLOR_SCHEME_LIGHT = 3
COLOR_SCHEME_CHOICES = [
COLOR_SCHEME_AUTOMATIC,
COLOR_SCHEME_NIGHT,
COLOR_SCHEME_LIGHT
]
color_scheme: int = models.PositiveSmallIntegerField(default=COLOR_SCHEME_AUTOMATIC)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the webapp.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams: int = models.PositiveSmallIntegerField(default=DEMOTE_STREAMS_AUTOMATIC)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default='')
# Emojisets
GOOGLE_EMOJISET = 'google'
GOOGLE_BLOB_EMOJISET = 'google-blob'
TEXT_EMOJISET = 'text'
TWITTER_EMOJISET = 'twitter'
EMOJISET_CHOICES = ((GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"))
emojiset: str = models.CharField(default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20)
AVATAR_FROM_GRAVATAR = 'G'
AVATAR_FROM_USER = 'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source: str = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = 'W'
TUTORIAL_STARTED = 'S'
TUTORIAL_FINISHED = 'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status: str = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default='[]')
zoom_token: Optional[object] = JSONField(default=None, null=True)
objects: UserManager = UserManager()
# Define the types of the various automatically managed properties
property_types = dict(
color_scheme=int,
default_language=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
starred_message_counts=bool,
timezone=str,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: ugettext_lazy("Organization owner"),
ROLE_REALM_ADMINISTRATOR: ugettext_lazy("Organization administrator"),
ROLE_MEMBER: ugettext_lazy("Member"),
ROLE_GUEST: ugettext_lazy("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get("rendered_value")
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append({
'id': field_data['id'],
'name': field_data['name'],
'type': field_data['type'],
'hint': field_data['hint'],
'field_data': field_data['field_data'],
'order': field_data['order'],
'value': value,
'rendered_value': rendered_value,
})
return data
def can_admin_user(self, target_user: 'UserProfile') -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_new_member(self) -> bool:
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return self.role == UserProfile.ROLE_REALM_ADMINISTRATOR or \
self.role == UserProfile.ROLE_REALM_OWNER
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if self.is_realm_admin or \
not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS:
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in ['create_stream_policy', 'invite_to_stream_policy']:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
return not self.is_new_member
def can_create_streams(self) -> bool:
return self.has_permission('create_stream_policy')
def can_subscribe_other_users(self) -> bool:
return self.has_permission('invite_to_stream_policy')
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def can_access_all_realm_members(self) -> bool:
return not (self.realm.is_zephyr_mirror_realm or self.is_guest)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or 'root')
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=100)
members: Manager = models.ManyToManyField(UserProfile, through='UserGroupMembership')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default='')
class Meta:
unique_together = (('realm', 'name'),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (('user_group', 'user_profile'),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_push_notifications and
not user_profile.is_bot)
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_offline_email_notifications and
not user_profile.is_bot)
def receives_online_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile: UserProfile) -> bool:
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the Full Name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated: bool = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation: bool = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required: bool = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# Changes to INVITED_AS should also be reflected in
# settings_invites.invited_as_values in
# static/js/settings_invites.js
INVITE_AS = dict(
REALM_OWNER = 100,
REALM_ADMIN = 200,
MEMBER = 400,
GUEST_USER = 600,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS['MEMBER'])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime)
class MultiuseInvite(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
referred_by: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(default=PreregistrationUser.INVITE_AS['MEMBER'])
class EmailChangeStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# The user whose device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return secrets.token_hex(16)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default='')
rendered_description: str = models.TextField(default='')
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32, default=generate_email_token_for_stream, unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
'forever': -1,
'realm_default': None,
}
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days",
"date_created",
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result['stream_id'] = self.id
continue
elif field_name == "date_created":
result['date_created'] = datetime_to_timestamp(self.date_created)
continue
result[field_name] = getattr(self, field_name)
result['is_announcement_only'] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted: datetime.datetime = models.DateTimeField(default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
class Meta:
unique_together = ('user_profile', 'stream', 'topic_name')
def __str__(self) -> str:
return (f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>")
class Client(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f'get_client:{make_safe_digest(name)}'
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name: str, realm_id: int) -> bool:
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id,
).exists()
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
'''
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
'''
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
return get_active_streams(realm).select_related().extra(
where=[where_clause],
params=(list(stream_names),))
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return bulk_cached_fetch(
stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name,
)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert(recipient.type == Recipient.HUDDLE)
return Subscription.objects.filter(
recipient=recipient,
).order_by('user_profile_id').values_list('user_profile_id', flat=True)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by('user_profile_id')
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField('date sent', db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
archive_transaction: ArchiveTransaction = models.ForeignKey(ArchiveTransaction, on_delete=CASCADE)
class Message(AbstractMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
'''
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
'''
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content: Optional[str],
rendered_content_version: Optional[int],
markdown_version: int) -> bool:
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < markdown_version)
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'zulipterminal', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith('/me '):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type: str = models.TextField()
content: str = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['id', 'message_id', 'sender_id', 'msg_type', 'content']
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by('message_id', 'id')
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class Draft(models.Model):
""" Server-side storage model for storing drafts so that drafts can be synced across
multiple clients/devices.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
recipient: Optional[Recipient] = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
topic: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField() # Length should not exceed MAX_MESSAGE_LENGTH
last_edit_time: datetime.datetime = models.DateTimeField(db_index=True)
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.user_profile.email} / {self.id} / {self.last_edit_time}>"
def to_dict(self) -> Dict[str, Any]:
if self.recipient is None:
_type = ""
to = []
elif self.recipient.type == Recipient.STREAM:
_type = "stream"
to = [self.recipient.type_id]
else:
_type = "private"
if self.recipient.type == Recipient.PERSONAL:
to = [self.recipient.type_id]
else:
to = []
for r in get_display_recipient(self.recipient):
assert(not isinstance(r, str)) # It will only be a string for streams
if not r["id"] == self.user_profile_id:
to.append(r["id"])
return {
"id": self.id,
"type": _type,
"to": to,
"topic": self.topic,
"content": self.content,
"timestamp": int(self.last_edit_time.timestamp()),
}
class AbstractReaction(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = 'unicode_emoji'
REALM_EMOJI = 'realm_emoji'
ZULIP_EXTRA_EMOJI = 'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, ugettext_lazy("Unicode emoji")),
(REALM_EMOJI, ugettext_lazy("Custom emoji")),
(ZULIP_EXTRA_EMOJI, ugettext_lazy("Zulip extra emoji")))
reaction_type: str = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"))
class Reaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
'read',
'starred',
'collapsed',
'mentioned',
'wildcard_mentioned',
# These next 4 flags are from features that have since been removed.
'summarize_in_home',
'summarize_in_stream',
'force_expand',
'force_collapse',
# Whether the message contains any of the user's alert words.
'has_alert_word',
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
'historical',
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
'is_private',
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
'active_mobile_push_notification',
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return 'flags & 1 = 0'
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return 'flags & 2 <> 0'
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return 'flags & 4096 <> 0'
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(user_profile: UserProfile, message_id: int) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(user_profile=user_profile,
message__id=message_id)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now, db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': int(time.mktime(self.create_time.timetuple()) * 1000),
'messages': [{
'id': m.id,
'date_sent': int(time.mktime(m.date_sent.timetuple()) * 1000),
} for m in self.messages.all()],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (attachment.is_realm_public and attachment.realm == user_profile.realm and
user_profile.can_access_public_streams()):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages]).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(id__in=relevant_stream_ids,
history_public_to_subscribers=True).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used by our unit tests and for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
'''
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
'''
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm]=None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list('id', flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600*24*7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).exclude(
role=UserProfile.ROLE_GUEST,
).values_list('id', flat=True)
return list(query)
def get_source_profile(email: str, string_id: str) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm(string_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# group private message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField('last visit')
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField('start time', db_index=True)
end: datetime.datetime = models.DateTimeField('end time', db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField('presence changed')
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the webapp.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f'Unknown status: {status}')
@staticmethod
def to_presence_dict(client_name: str, status: int, dt: datetime.datetime, push_enabled: bool=False,
has_push_devices: bool=False) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == 'active':
# See https://github.com/python/mypy/issues/2611
status_val: Optional[int] = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default='')
class DefaultStream(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField('Stream')
description: str = models.CharField(max_length=1024, default='')
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class ScheduledMessage(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, 'send_later'),
(REMIND, 'remind'),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES, default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = '1'
NEW_VALUE = '2'
ROLE_COUNT = '10'
ROLE_COUNT_HUMANS = '11'
ROLE_COUNT_BOTS = '12'
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
USER_NOTIFICATION_SETTINGS_CHANGED = 132
USER_DIGEST_EMAIL_CREATED = 133
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
REALM_PROPERTY_CHANGED = 207
REALM_ICON_SOURCE_CHANGED = 208
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
SUBSCRIPTION_PROPERTY_CHANGED = 304
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
STREAM_CREATED = 601
STREAM_DEACTIVATED = 602
STREAM_NAME_CHANGED = 603
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED, USER_ACTIVATED, USER_DEACTIVATED, USER_REACTIVATED, USER_ROLE_CHANGED,
REALM_DEACTIVATED, REALM_REACTIVATED]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, related_name="+", on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream, null=True, on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, val: object,
allow_deactivated: bool=False) -> List[int]:
user_ids = check_list(check_int)("User IDs", val)
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_('Invalid user ID: {}').format(user_id))
if not allow_deactivated:
if not user_profile.is_active:
raise ValidationError(_('User with ID {} is deactivated').format(user_id))
if (user_profile.is_bot):
raise ValidationError(_('User with ID {} is a bot').format(user_id))
return user_ids
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default='', null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
CHOICE = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. CHOICE require field_data, USER require
# realm as argument.
CHOICE_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(CHOICE, ugettext_lazy('List of options'), validate_choice_field, str, "CHOICE"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, ugettext_lazy('Person picker'), check_valid_user_ids, ast.literal_eval, "USER"),
]
CHOICE_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in CHOICE_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, Display Name, Validator, Converter, Keyword
(SHORT_TEXT, ugettext_lazy('Short text'), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, ugettext_lazy('Long text'), check_long_string, str, "LONG_TEXT"),
(DATE, ugettext_lazy('Date picker'), check_date, str, "DATE"),
(URL, ugettext_lazy('Link'), check_url, str, "URL"),
(EXTERNAL_ACCOUNT, ugettext_lazy('External account'), check_short_string, str, "EXTERNAL_ACCOUNT"),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *CHOICE_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator[Union[int, str, List[int]]]] = {item[0]: item[2] for item in FIELD_TYPE_DATA}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {item[0]: item[3] for item in ALL_FIELD_TYPES}
FIELD_TYPE_CHOICES: List[Tuple[int, Promise]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES, default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type CHOICE store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default='', null=True)
class Meta:
unique_together = ('realm', 'name')
def as_dict(self) -> ProfileDataElementBase:
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
'hint': self.hint,
'field_data': self.field_data,
'order': self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by('order')
class CustomProfileFieldValue(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ('user_profile', 'field')
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query URL, data to be sent to URL,
# and parsing the response.
GENERIC_INTERFACE = 'GenericService'
SLACK_INTERFACE = 'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: int) -> List[Service]:
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id: int, service_name: str) -> Service:
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain() -> str:
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(settings.FAKE_EMAIL_DOMAIN + ' is not a valid domain. '
'Consider setting the FAKE_EMAIL_DOMAIN setting.')
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name='ID')
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs['instance'].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
|
kou/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 130,540
|
[
"VisIt"
] |
8e3b60fedffde2f3b4c22dfe4b6af9870ce155d5686f046aa4017906fe4770b3
|
"""
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0), optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print(
"Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta)
)
print(
"Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta)
)
print(
"Accuracy: %.3f (initial) %.3f (optimized)"
% (
accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size])),
)
)
print(
"Log-loss: %.3f (initial) %.3f (optimized)"
% (
log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1]),
)
)
# Plot posteriors
plt.figure()
plt.scatter(
X[:train_size, 0], y[:train_size], c="k", label="Train data", edgecolors=(0, 0, 0)
)
plt.scatter(
X[train_size:, 0], y[train_size:], c="g", label="Test data", edgecolors=(0, 0, 0)
)
X_ = np.linspace(0, 5, 100)
plt.plot(
X_,
gp_fix.predict_proba(X_[:, np.newaxis])[:, 1],
"r",
label="Initial kernel: %s" % gp_fix.kernel_,
)
plt.plot(
X_,
gp_opt.predict_proba(X_[:, np.newaxis])[:, 1],
"b",
label="Optimized kernel: %s" % gp_opt.kernel_,
)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure()
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [
[
gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])
]
for j in range(Theta0.shape[1])
]
LML = np.array(LML).T
plt.plot(
np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1], "ko", zorder=10
)
plt.plot(
np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1], "ko", zorder=10
)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/gaussian_process/plot_gpc.py
|
Python
|
bsd-3-clause
| 3,999
|
[
"Gaussian"
] |
c8e9ead4eec364b492fd944d43f41939733410580e55530ac6f9031c0f8b2f07
|
# -*- coding: utf-8 -*-
# HiPart is a program to analyze the electronic structure of molecules with
# fuzzy-atom partitioning methods.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of HiPart.
#
# HiPart is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HiPart is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
# TODO: Support for Gaussian/GAMESS wfn files + make clever algo that detects
# contractions
# TODO: Extend hi-atomdb.py to work with GAMESS
# TODO: Implement the potential generated by an atomic density, and evaluate it
# on grids of other atoms. This can be used to compute QM-level electrostatic
# interaction energies. (See Becke's paper http://dx.doi.org/10.1063/1.455005)
# TODO: Compute condensed linear response properties
# TODO: Support for CP2K and CPMD wavefunctions
# TODO: Visualize Atomic deviations from sphericity (1D plots)
# TODO: Visualization of atomic (pair) data with graphs
# TODO: Cube files with atomic weights and densities
# TODO: use smaller lebedev grids close to the cusps
from hipart.atoms import AtomTable
from hipart.ext import grid_distances
from hipart.fit import ESPCostFunction
from hipart.gint import dmat_to_full
from hipart.grids import Grid, AtomicGrid, RLogIntGrid, ALebedevIntGrid
from hipart.io import dump_atom_scalars, dump_atom_vectors, dump_atom_matrix, \
dump_atom_fields, dump_overlap_matrices
from hipart.lebedev_laikov import get_grid as get_lebedev_grid
from hipart.log import log
from hipart.spline import CubicSpline
from molmod import Rotation, angstrom
from molmod.periodic import periodic
import os, numpy
__all__ = ["ParseError", "scheme_classes"]
noble_numbers = numpy.array([0,2,10,18,36,54,86,118])
core_sizes = dict((number, noble_numbers[noble_numbers<=number].max()) for number in periodic.iter_numbers())
class ParseError(Exception):
pass
class OnlyOnce(object):
def __init__(self, description):
self.description = description
def __call__(self, fn):
def wrapper(instance):
if fn.func_name in instance._done:
return
log.begin(self.description)
fn(instance)
log.end()
instance._done.add(fn.func_name)
wrapper.__doc__ = fn.__doc__
return wrapper
class LazyDistances(object):
def __init__(self, centers, grid, save_mem):
self.centers = centers
self.grid = grid
self.save_mem = save_mem
if save_mem:
self._result = numpy.zeros(grid.size, float)
else:
self._cache = {}
def __len__(self):
return len(self.centers)
def __getitem__(self, index):
if self.save_mem:
# in case we want to save memory, the grid distances are always
# computed from scratch. For reasons of efficiency, the same result
# array is used to avoid repetetive memory allocation
grid_distances(self.centers[index], self.grid.points, self._result)
return self._result
else:
result = self._cache.get(index)
if result is None:
result = numpy.zeros(self.grid.size, float)
grid_distances(self.centers[index], self.grid.points, result)
self._cache[index] = result
return result
class BaseScheme(object):
prefix = None
usage = None
@classmethod
def new_from_args(cls, context, args):
raise NotImplementedError
def __init__(self, context, rgrid, extra_tag_attributes):
# create angular grid object
agrid = ALebedevIntGrid(context.options.lebedev, context.options.do_random)
# check arguments
extra_tag_attributes["rgrid"] = rgrid.get_description()
extra_tag_attributes["agrid"] = agrid.get_description()
context.check_tag(extra_tag_attributes)
# assign attributes
self.context = context
self.rgrid = rgrid
self.agrid = agrid
self._done = set([])
# clone attributes from context
self.work = context.work
self.output = context.output
self.wavefn = context.wavefn
self.molecule = context.wavefn.molecule
def _spherint(self, integrand):
radfun = self.agrid.integrate(integrand)
rs = self.rgrid.rs[:len(integrand)]
return self.rgrid.integrate(radfun*rs*rs)
@OnlyOnce("Atomic grids")
def do_atgrids(self):
self.atgrids = []
pb = log.pb("Computing/Loading atomic grids and distances", self.molecule.size)
for i in xrange(self.molecule.size):
pb()
name = "atom%05i" % i
atgrid = AtomicGrid.from_prefix(name, self.work)
if atgrid is None:
center = self.molecule.coordinates[i]
atgrid = AtomicGrid.from_parameters(name, self.work, center, self.rgrid, self.agrid)
self.atgrids.append(atgrid)
# Compute and store all the distances from these grid points to the
# nuclei.
atgrid.distances = LazyDistances(self.molecule.coordinates, atgrid, self.context.options.save_mem)
pb()
@OnlyOnce("Molecular density on atomic grids")
def do_atgrids_moldens(self):
self.do_atgrids()
pb = log.pb("Computing/Loading densities", self.molecule.size)
for i in xrange(self.molecule.size):
pb()
self.wavefn.compute_density(self.atgrids[i])
pb()
@OnlyOnce("Molecular spin density on atomic grids")
def do_atgrids_molspindens(self):
self.do_atgrids()
pb = log.pb("Computing/Loading spin densities", self.molecule.size)
for i in xrange(self.molecule.size):
pb()
self.wavefn.compute_spin_density(self.atgrids[i])
pb()
@OnlyOnce("Estimating noble gas core radii")
def do_noble_radii(self):
self.noble_radii = self.work.load("noble_radii")
if self.noble_radii is None:
self.do_atgrids_moldens()
self.noble_radii = numpy.zeros(self.molecule.size, float)
for i, number_i in enumerate(self.molecule.numbers):
if number_i < 3:
self.noble_radii[i] = 0.2
else:
densities = self.atgrids[i].moldens
radfun = self.agrid.integrate(densities)
rs = self.rgrid.rs[:len(radfun)]
charge_int = self.rgrid.integrate_cumul(radfun*rs*rs)
j = charge_int.searchsorted([core_sizes[number_i]])[0]
self.noble_radii[i] = self.rgrid.rs[j]
self.work.dump("noble_radii", self.noble_radii)
@OnlyOnce("Computing the ESP cost function")
def do_esp_costfunction(self):
# TODO: the ESP cost function should be upgraded to a more reliable
# implementation. We should consider the cost function as an integral
# over the volume where the density is not too high and the distance
# from the molecule is not too far. This can be achieved by a
# combination of Becke's integration scheme
# (http://dx.doi.org/10.1063/1.454033) and Hu's ESP method
# (http://dx.doi.org/10.1021/ct600295n). Then there is no need to
# construct a molecular grid. The atomic grids are sufficient.
# TODO: output ESP charges in the same way as the stockholder charges.
self.do_molgrid_moldens()
self.do_molgrid_molpot()
self.mol_esp_cost = ESPCostFunction(
self.molecule.coordinates, self.molgrid.points, self.molgrid.weights,
self.molgrid.moldens, self.molgrid.molpot, self.wavefn.charge,
)
self.output.dump_esp_cost("mol_esp_cost.txt", self.mol_esp_cost)
@OnlyOnce("Molecular grid")
def do_molgrid(self):
self.molgrid = Grid.from_prefix("molecule", self.work)
if self.molgrid is not None:
self.molgrid.weights = self.molgrid.load("weights")
else:
# we have to generate a new grid. The grid is constructed taking
# into account the following considerations:
# 1) Grid points within the cusp region are discarded
# 2) The rest of the molecular and surrounding volume is sampled
# with spherical grids centered on the atoms. Around each atom,
# 'scale_steps' of shells are placed with lebedev grid points
# (num_lebedev). The lebedev weights are used in the fit to
# avoid preferential directions within one shell.
# 3) The radii of the shells start from scale_min*(cusp_radius+0.2)
# and go up to scale_max*(cusp_radius+0.2).
# 4) Each shell will be randomly rotated around the atom to avoid
# global preferential directions in the grid.
# 5) The default parameters for the grid should be sufficient for
# sane ESP fitting. The ESP cost function should discard points
# with a density larger than a threshold, i.e. 1e-5 a.u. A
# gradual transition between included and discarded points around
# this threshold will improve the quality of the fit.
lebedev_xyz, lebedev_weights = get_lebedev_grid(50)
self.do_noble_radii()
scale_min = 1.5
scale_max = 30.0
scale_steps = 30
scale_factor = (scale_max/scale_min)**(1.0/(scale_steps-1))
scales = scale_min*scale_factor**numpy.arange(scale_steps)
points = []
weights = []
pb = log.pb("Constructing molecular grid", scale_steps)
for scale in scales:
pb()
radii = scale*self.noble_radii
for i in xrange(self.molecule.size):
rot = Rotation.random()
for j in xrange(len(lebedev_xyz)):
my_point = radii[i]*numpy.dot(rot.r, lebedev_xyz[j]) + self.molecule.coordinates[i]
distances = numpy.sqrt(((self.molecule.coordinates - my_point)**2).sum(axis=1))
if (distances < scales[0]*self.noble_radii).any():
continue
points.append(my_point)
weights.append(lebedev_weights[j])
pb()
points = numpy.array(points)
weights = numpy.array(weights)
self.molgrid = Grid("molecule", self.work, points)
self.molgrid.weights = weights
self.molgrid.dump("weights", weights)
@OnlyOnce("Molecular density on the molecular grid")
def do_molgrid_moldens(self):
self.do_molgrid()
self.wavefn.compute_density(self.molgrid)
@OnlyOnce("Molecular potential on the molecular grid")
def do_molgrid_molpot(self):
self.do_molgrid()
log("This may take a minute. Hang on.")
self.wavefn.compute_potential(self.molgrid)
def _prepare_atweights(self):
pass
@OnlyOnce("Defining atomic weight functions (own atomic grid)")
def do_atgrids_atweights(self):
self.do_atgrids()
log("Trying to load weight functions")
success = self._load_atgrid_atweights()
if not success:
log("Could not load all weight functions from workdir. Computing them.")
self._prepare_atweights()
self._compute_atgrid_atweights()
log("Writing results to workdir")
self._dump_atgrid_atweights()
def _load_atgrid_atweights(self):
ws = []
for i in xrange(self.molecule.size):
w = self.atgrids[i].load("%s_atweights" % self.prefix)
if w is None:
return False
else:
ws.append(w)
for i in xrange(self.molecule.size):
self.atgrids[i].atweights = ws[i]
return True
def _compute_atgrid_atweights(self):
raise NotImplementedError
def _dump_atgrid_atweights(self):
for i in xrange(self.molecule.size):
self.atgrids[i].dump("%s_atweights" % self.prefix, self.atgrids[i].atweights, ignore=True)
@OnlyOnce("Atomic charges")
def do_charges(self):
charges_name = "%s_charges" % self.prefix
populations_name = "%s_populations" % self.prefix
self.charges = self.work.load(charges_name)
self.populations = self.work.load(populations_name)
if self.charges is None or self.populations is None:
self.do_atgrids()
self.do_atgrids_moldens()
self.do_atgrids_atweights()
pb = log.pb("Computing charges", self.molecule.size)
self.populations = numpy.zeros(self.molecule.size, float)
self.charges = numpy.zeros(self.molecule.size, float)
for i in xrange(self.molecule.size):
pb()
w = self.atgrids[i].atweights
d = self.atgrids[i].moldens
center = self.molecule.coordinates[i]
self.populations[i] = self._spherint(d*w)
self.charges[i] = self.wavefn.nuclear_charges[i] - self.populations[i]
pb()
if self.context.options.fix_total_charge:
self.charges -= (self.charges.sum() - self.wavefn.charge)/self.molecule.size
self.work.dump(charges_name, self.charges)
self.work.dump(populations_name, self.populations)
self.output.dump_atom_scalars("%s_charges.txt" % self.prefix, self.charges, "Charge")
@OnlyOnce("Atomic spin charges")
def do_spin_charges(self):
spin_charges_name = "%s_spin_charges" % self.prefix
self.spin_charges = self.work.load(spin_charges_name)
if self.spin_charges is None:
self.do_atgrids()
self.do_atgrids_molspindens()
self.do_atgrids_atweights()
pb = log.pb("Computing spin charges", self.molecule.size)
self.spin_charges = numpy.zeros(self.molecule.size, float)
for i in xrange(self.molecule.size):
pb()
w = self.atgrids[i].atweights
d = self.atgrids[i].molspindens
center = self.molecule.coordinates[i]
self.spin_charges[i] = self._spherint(d*w)
pb()
self.work.dump(spin_charges_name, self.spin_charges)
self.output.dump_atom_scalars("%s_spin_charges.txt" % self.prefix, self.spin_charges, "Spin charge")
@OnlyOnce("Atomic dipoles")
def do_dipoles(self):
dipoles_name = "%s_dipoles" % self.prefix
self.dipoles = self.work.load(dipoles_name, (-1,3))
if self.dipoles is None:
self.do_atgrids()
self.do_atgrids_moldens()
self.do_atgrids_atweights()
pb = log.pb("Computing dipoles", self.molecule.size)
self.dipoles = numpy.zeros((self.molecule.size,3), float)
for i in xrange(self.molecule.size):
pb()
atgrid = self.atgrids[i]
w = atgrid.atweights
d = atgrid.moldens
center = self.molecule.coordinates[i]
for j in 0,1,2:
integrand = -(atgrid.points[:,j] - center[j])*d*w
self.dipoles[i,j] = self._spherint(integrand)
pb()
self.work.dump(dipoles_name, self.dipoles)
self.output.dump_atom_vectors("%s_dipoles.txt" % self.prefix, self.dipoles, "Dipoles")
@OnlyOnce("Atomic multipoles (up to hexadecapols)")
def do_multipoles(self):
regular_solid_harmonics = [
lambda x,y,z: 1.0, # (0,0)
lambda x,y,z: z, # (1,0)
lambda x,y,z: x, # (1,1+)
lambda x,y,z: y, # (1,1-)
lambda x,y,z: 1.0*z**2 - 0.5*x**2 - 0.5*y**2, # (2,0)
lambda x,y,z: 1.7320508075688772935*x*z, # (2,1+)
lambda x,y,z: 1.7320508075688772935*y*z, # (2,1-)
lambda x,y,z: 0.86602540378443864676*x**2 - 0.86602540378443864676*y**2, # (2,2+)
lambda x,y,z: 1.7320508075688772935*x*y, # (2,2-)
lambda x,y,z: -1.5*z*x**2 - 1.5*z*y**2 + z**3, # (3,0)
lambda x,y,z: 2.4494897427831780982*x*z**2 - 0.61237243569579452455*x*y**2 - 0.61237243569579452455*x**3, # (3,1+)
lambda x,y,z: 2.4494897427831780982*y*z**2 - 0.61237243569579452455*y*x**2 - 0.61237243569579452455*y**3, # (3,1-)
lambda x,y,z: 1.9364916731037084426*z*x**2 - 1.9364916731037084426*z*y**2, # (3,2+)
lambda x,y,z: 3.8729833462074168852*x*y*z, # (3,2-)
lambda x,y,z: -2.371708245126284499*x*y**2 + 0.790569415042094833*x**3, # (3,3+)
lambda x,y,z: 2.371708245126284499*y*x**2 - 0.790569415042094833*y**3, # (3,3-)
lambda x,y,z: 0.75*x**2*y**2 - 3.0*x**2*z**2 - 3.0*y**2*z**2 + z**4 + 0.375*x**4 + 0.375*y**4, # (4,0)
lambda x,y,z: -2.371708245126284499*x*z*y**2 + 3.162277660168379332*x*z**3 - 2.371708245126284499*z*x**3, # (4,1+)
lambda x,y,z: -2.371708245126284499*y*z*x**2 + 3.162277660168379332*y*z**3 - 2.371708245126284499*z*y**3, # (4,1-)
lambda x,y,z: 3.3541019662496845446*x**2*z**2 - 3.3541019662496845446*y**2*z**2 + 0.5590169943749474241*y**4 - 0.5590169943749474241*x**4, # (4,2+)
lambda x,y,z: 6.7082039324993690892*x*y*z**2 - 1.1180339887498948482*x*y**3 - 1.1180339887498948482*y*x**3, # (4,2-)
lambda x,y,z: -6.2749501990055666098*x*z*y**2 + 2.0916500663351888699*z*x**3, # (4,3+)
lambda x,y,z: 6.2749501990055666098*y*z*x**2 - 2.0916500663351888699*z*y**3, # (4,3-)
lambda x,y,z: -4.4370598373247120319*x**2*y**2 + 0.73950997288745200532*x**4 + 0.73950997288745200532*y**4, # (4,4+)
lambda x,y,z: 2.9580398915498080213*y*x**3 - 2.9580398915498080213*x*y**3, # (4,4-)
]
labels = [
'(0,0)', '(1,0)', '(1,1+)', '(1,1-)', '(2,0)', '(2,1+)', '(2,1-)',
'(2,2+)', '(2,2-)', '(3,0)', '(3,1+)', '(3,1-)', '(3,2+)', '(3,2-)',
'(3,3+)', '(3,3-)', '(4,0)', '(4,1+)', '(4,1-)', '(4,2+)', '(4,2-)',
'(4,3+)', '(4,3-)', '(4,4+)', '(4,4-)'
]
multipoles_name = "%s_multipoles.bin" % self.prefix
num_polys = len(regular_solid_harmonics)
shape = (self.molecule.size,num_polys)
self.multipoles = self.work.load(multipoles_name, shape)
if self.multipoles is None:
self.do_atgrids()
self.do_atgrids_moldens()
self.do_atgrids_atweights()
pb = log.pb("Computing multipoles", self.molecule.size)
num_polys = len(regular_solid_harmonics)
self.multipoles = numpy.zeros(shape, float)
for i in xrange(self.molecule.size):
pb()
atgrid = self.atgrids[i]
w = atgrid.atweights
d = atgrid.moldens
center = self.molecule.coordinates[i]
cx = atgrid.points[:,0] - center[0]
cy = atgrid.points[:,1] - center[1]
cz = atgrid.points[:,2] - center[2]
for j in xrange(num_polys):
poly = regular_solid_harmonics[j]
self.multipoles[i,j] = self._spherint(-poly(cx,cy,cz)*d*w)
self.multipoles[i,0] += self.wavefn.nuclear_charges[i]
pb()
self.work.dump(multipoles_name, self.multipoles)
self.output.dump_atom_fields("%s_multipoles.txt" % self.prefix, self.multipoles, labels, "Multipoles")
@OnlyOnce("Testing charges and dipoles on ESP grid.")
def do_esp_test(self):
self.do_charges()
self.do_dipoles()
self.do_esp_costfunction()
dipole_q = numpy.dot(self.charges, self.molecule.coordinates)
dipole_p = self.dipoles.sum(axis=0)
dipole_qp = dipole_q + dipole_p
dipole_qm = self.wavefn.dipole
self.output.dump_esp_test(
"%s_esp_test.txt" % self.prefix, dipole_q, dipole_p, dipole_qp,
dipole_qm, self.mol_esp_cost, self.charges, self.dipoles
)
@OnlyOnce("Evaluating orbitals on atomic grids")
def do_atgrids_orbitals(self):
self.do_atgrids()
self.wavefn.init_naturals(self.work)
pb = log.pb("Computing/Loading orbitals", self.molecule.size)
for i in xrange(self.molecule.size):
pb()
self.wavefn.compute_orbitals(self.atgrids[i])
pb()
@OnlyOnce("Atomic overlap matrices (orbitals)")
def do_atgrids_overlap_matrix_orb(self):
# Note that the overlap matrices are computed in the basis of the
# orbitals. Each kind of overlap matrix is thus computed in the basis
# of its corresponding kind of orbitals.
self.do_atgrids()
def do_one_kind(kind):
# first check for restricted
orbitals = getattr(self.wavefn, "%s_orbitals" % kind)
if kind!="alpha" and self.wavefn.alpha_orbitals is orbitals:
# simply make references to alpha data and return
log("Cloning alpha results (%s)" % kind)
for i in xrange(self.molecule.size):
setattr(self.atgrids[i], "%s_overlap_matrix_orb" % kind, self.atgrids[i].alpha_overlap_matrix_orb)
return
# then try to load the matrices
some_failed = False
num_orbitals = self.wavefn.num_orbitals
for i in xrange(self.molecule.size):
matrix = self.atgrids[i].load("%s_%s_overlap_matrix_orb" % (self.prefix, kind))
if matrix is None:
some_failed = True
else:
matrix = matrix.reshape((num_orbitals, num_orbitals))
setattr(self.atgrids[i], "%s_overlap_matrix_orb" % kind, matrix)
if some_failed:
self.do_atgrids_orbitals()
self.do_atgrids_atweights()
pb = log.pb("Computing atomic overlap matrices (%s)" % kind, self.molecule.size)
for i in xrange(self.molecule.size):
pb()
if getattr(self.atgrids[i], "%s_overlap_matrix_orb" % kind) is None:
orbitals = getattr(self.atgrids[i], "%s_orbitals" % kind)
w = self.atgrids[i].atweights
matrix = numpy.zeros((num_orbitals,num_orbitals), float)
for j1 in xrange(num_orbitals):
for j2 in xrange(j1+1):
integrand = orbitals[j1]*orbitals[j2]*w
value = self._spherint(integrand)
matrix[j1,j2] = value
matrix[j2,j1] = value
setattr(self.atgrids[i], "%s_overlap_matrix_orb" % kind, matrix)
self.atgrids[i].dump("%s_%s_overlap_matrix_orb" % (self.prefix, kind), matrix)
pb()
filename = "%s_%s_overlap_matrices_orb.txt" % (self.prefix, kind)
overlap_matrices = [
getattr(grid, "%s_overlap_matrix_orb" % kind)
for grid in self.atgrids
]
self.output.dump_overlap_matrices(filename, overlap_matrices)
do_one_kind("alpha")
do_one_kind("beta")
do_one_kind("natural")
@OnlyOnce("Atomic overlap matrices (contracted Gaussians)")
def do_atgrids_overlap_matrix(self):
self.do_atgrids()
self.do_atgrids_atweights()
num_orbitals = self.wavefn.num_orbitals
pb = log.pb("Computing matrices", self.molecule.size)
for i in xrange(self.molecule.size):
pb()
atgrid = self.atgrids[i]
suffix = "%s_overlap_matrix" % self.prefix
overlap = atgrid.load(suffix)
if overlap is None:
rw = self.rgrid.get_weights().copy()
rw *= 4*numpy.pi
rw *= self.rgrid.rs
rw *= self.rgrid.rs
weights = numpy.outer(rw, self.agrid.lebedev_weights).ravel()
weights *= atgrid.atweights
overlap = self.wavefn.compute_atomic_overlap(atgrid, weights)
atgrid.dump(suffix, overlap)
else:
overlap = overlap.reshape((num_orbitals, num_orbitals))
atgrid.overlap_matrix = overlap
pb()
filename = "%s_overlap_matrices.txt" % self.prefix
overlap_matrices = [atgrid.overlap_matrix for atgrid in self.atgrids]
self.output.dump_overlap_matrices(filename, overlap_matrices)
@OnlyOnce("Bond orders and valences")
def do_bond_orders(self):
# first try to load the results from the work dir
bond_orders_name = "%s_bond_orders" % self.prefix
valences_name = "%s_valences" % self.prefix
self.bond_orders = self.work.load(bond_orders_name, (self.molecule.size, self.molecule.size))
self.valences = self.work.load(valences_name)
if self.bond_orders is None or self.valences is None:
self.do_charges()
self.do_atgrids_overlap_matrix()
self.bond_orders = numpy.zeros((self.molecule.size, self.molecule.size))
self.valences = numpy.zeros(self.molecule.size)
num_dof = self.wavefn.num_orbitals
full = numpy.zeros((num_dof, num_dof), float)
dmat_to_full(self.wavefn.density_matrix, full)
if self.wavefn.spin_density_matrix is None:
full_alpha = 0.5*full
full_beta = full_alpha
else:
full_alpha = numpy.zeros((num_dof, num_dof), float)
full_beta = numpy.zeros((num_dof, num_dof), float)
dmat_to_full(
0.5*(self.wavefn.density_matrix +
self.wavefn.spin_density_matrix), full_alpha
)
dmat_to_full(
0.5*(self.wavefn.density_matrix -
self.wavefn.spin_density_matrix), full_beta
)
pb = log.pb("Computing bond orders", (self.molecule.size*(self.molecule.size+1))/2)
for i in xrange(self.molecule.size):
for j in xrange(i+1):
pb()
if i==j:
# compute valence
tmp = numpy.dot(full, self.atgrids[i].overlap_matrix)
self.valences[i] = 2*self.populations[i] - (tmp*tmp.transpose()).sum()
else:
# compute bond order
bo = (
numpy.dot(full_alpha, self.atgrids[i].overlap_matrix)*
numpy.dot(full_alpha, self.atgrids[j].overlap_matrix).transpose()
).sum()
if full_alpha is full_beta:
bo *= 2
else:
bo += (
numpy.dot(full_beta, self.atgrids[i].overlap_matrix)*
numpy.dot(full_beta, self.atgrids[j].overlap_matrix).transpose()
).sum()
bo *= 2
self.bond_orders[i,j] = bo
self.bond_orders[j,i] = bo
pb()
self.work.dump(bond_orders_name, self.bond_orders)
self.work.dump(valences_name, self.valences)
self.free_valences = self.valences - self.bond_orders.sum(axis=1)
self.output.dump_atom_matrix("%s_bond_orders.txt" % self.prefix, self.bond_orders, "Bond order")
self.output.dump_atom_scalars("%s_valences.txt" % self.prefix, self.valences, "Valences")
self.output.dump_atom_scalars("%s_free_valences.txt" % self.prefix, self.free_valences, "Free valences")
@OnlyOnce("Atomic weights on other atoms' grids.")
def do_atgrids_od_atweights(self):
# od stands for off-diagonal
self.do_atgrids_atweights()
self._prepare_atweights()
pb = log.pb("Computing off-diagonal atom weights", self.molecule.size**2)
for i in xrange(self.molecule.size):
atgrid = self.atgrids[i]
atgrid.od_atweights = []
for j in xrange(self.molecule.size):
pb()
w = self._compute_atweights(atgrid, j)
atgrid.od_atweights.append(w)
pb()
def _compute_atweights(self, grid, atom_index):
raise NotImplementedError
@OnlyOnce("Net and overlap populations")
def do_net_overlap(self):
net_overlap_name = "%s_net_overlap.bin" % self.prefix
self.net_overlap = self.work.load(net_overlap_name, (self.molecule.size,self.molecule.size))
if self.net_overlap is None:
self.do_atgrids()
self.do_atgrids_moldens()
self.do_charges()
self.do_atgrids_od_atweights()
self.net_overlap = numpy.zeros((self.molecule.size, self.molecule.size))
pb = log.pb("Integrating over products of stockholder weights", (self.molecule.size*(self.molecule.size+1))/2)
for i in xrange(self.molecule.size):
for j in xrange(i+1):
pb()
if i != j:
# Use Becke's integration scheme to split the integral
# over two atomic grids.
# 1) first part of the integral, using the grid on atom i
delta = (self.atgrids[i].distances[j].reshape((len(self.rgrid.rs),-1)) - self.rgrid.rs.reshape((-1,1))).ravel()
switch = delta/self.molecule.distance_matrix[i,j]
for k in xrange(3):
switch = (3 - switch**2)*switch/2
switch += 1
switch /= 2
integrand = switch*self.atgrids[i].od_atweights[j]*self.atgrids[i].atweights*self.atgrids[i].moldens
part1 = self._spherint(integrand)
# 2) second part of the integral
delta = (self.atgrids[j].distances[i].reshape((len(self.rgrid.rs),-1)) - self.rgrid.rs.reshape((-1,1))).ravel()
switch = delta/self.molecule.distance_matrix[i,j]
for k in xrange(3):
switch = (3 - switch**2)*switch/2
switch += 1
switch /= 2
integrand = switch*self.atgrids[j].od_atweights[i]*self.atgrids[j].atweights*self.atgrids[j].moldens
part2 = self._spherint(integrand)
# Add up and store
self.net_overlap[i,j] = part1 + part2
self.net_overlap[j,i] = part1 + part2
else:
integrand = self.atgrids[i].atweights**2*self.atgrids[i].moldens
self.net_overlap[i,i] = self._spherint(integrand)
pb()
self.work.dump(net_overlap_name, self.net_overlap)
self.output.dump_atom_matrix("%s_net_overlap.txt" % self.prefix, self.net_overlap, "Net/Overlap")
class StockholderScheme(BaseScheme):
def do_proatomfns(self):
raise NotImplementedError
def _prepare_atweights(self):
self.do_proatomfns()
def _compute_atgrid_atweights(self):
for i in xrange(self.molecule.size):
self.atgrids[i].atweights = self._compute_atweights(
self.atgrids[i], i
)
def _compute_atweights(self, grid, atom_index):
"""Return the weight of atom with given index in the given grid points
"""
# construct the pro-atom and pro-molecule on this grid
pro_atom = self.proatomfns[atom_index](grid.distances[atom_index])
pro_mol = numpy.zeros(len(pro_atom), float)
for j in xrange(self.molecule.size):
pro_mol += self.proatomfns[j](grid.distances[j])
# clip the wieght between zero and one
return numpy.clip(pro_atom/pro_mol, 0, 1)
class TableBaseScheme(StockholderScheme):
@classmethod
def new_from_args(cls, context, args):
if len(args) == 1:
atom_table = AtomTable(args[0])
else:
raise ParseError("The Hirshfeld schemes require one scheme argument.")
return cls(context, atom_table)
def __init__(self, context, extra_tag_attributes, atom_table):
self.atom_table = atom_table
BaseScheme.__init__(self, context, atom_table.rgrid, extra_tag_attributes)
hirshfeld_usage = """ * Hirshfeld Partitioning
scheme = hirsh
scheme parameters = densities.txt
The file densities.txt is generated with the script hi-atomdb.py. It
contains spherically averaged densities of individual atoms. Make sure all
the atoms present in the molecule of interest are included in the file
densities.txt
Hirshfeld, F. L. Theor. Chim. Acta 1977, 44, 129-138.
http://dx.doi.org/10.1007/BF00549096
"""
class HirshfeldScheme(TableBaseScheme):
prefix = "hirsh"
usage = hirshfeld_usage
def __init__(self, context, atom_table):
TableBaseScheme.__init__(self, context, {}, atom_table)
@OnlyOnce("Conventional Hirshfeld (with neutral pro-atoms)")
def do_proatomfns(self):
self.do_atgrids()
self.proatomfns = []
for number in self.molecule.numbers:
self.proatomfns.append(self.atom_table.records[number].get_atom_fn())
hirshfeld_i_usage = """ * Hirshfeld-I Partitioning
scheme = hirshi
scheme parameters = densities.txt
The file densities.txt is generated with the script hi-atomdb.py. It
contains spherically averaged densities of individual atoms. Make sure all
the atoms present in the molecule of interest are included in the file
densities.txt
Bultinck, P.; Van Alsenoy, C.; Ayers, P. W.; Dorca, R. C. J. Chem. Phys.
2007, 126, 144111.
http://dx.doi.org/10.1063/1.2715563
"""
class HirshfeldIScheme(TableBaseScheme):
prefix = "hirshi"
usage = hirshfeld_i_usage
def __init__(self, context, atom_table):
extra_tag_attributes = {
"max_iter": str(context.options.max_iter),
"threshold": "%.5e" % context.options.threshold,
}
TableBaseScheme.__init__(self, context, extra_tag_attributes, atom_table)
@OnlyOnce("Iterative Hirshfeld")
def do_proatomfns(self):
self.do_atgrids_moldens()
counter = 0
old_populations = self.wavefn.nuclear_charges.astype(float)
log("Iteration Max change Total charge")
while True:
# construct the pro-atom density functions, using the densities
# from the previous iteration.
self.proatomfns = []
for i, number_i in enumerate(self.molecule.numbers):
self.proatomfns.append(self.atom_table.records[number_i].get_atom_fn(old_populations[i]))
populations = numpy.zeros(self.molecule.size, float)
for i in xrange(self.molecule.size):
integrand = self.atgrids[i].moldens*self._compute_atweights(self.atgrids[i], i)
population = self._spherint(integrand)
populations[i] = population
# ordinary blablabla ...
max_change = abs(populations-old_populations).max()
log("%5i % 10.5e % 10.5e" % (
counter, max_change, self.wavefn.nuclear_charges.sum() - populations.sum()
))
if max_change < self.context.options.threshold:
break
counter += 1
if counter > self.context.options.max_iter:
raise RuntimeError("Iterative Hirshfeld failed to converge.")
old_populations = populations
isa_usage = """ * Iterative Stockholder Partitioning
scheme = isa
scheme parameters = [r_low r_high steps]
Three additional parameters can be provided of the file rs.bin is not yet
present in the work directory. The first two, r_low and r_high, are the
first and the last point on the logarithmic radial grid in angstrom. The
third, steps, is the number of grid points on the radial grid. The default
is 2.0e-5, 20.0 and 100, respectively.
Lillestolen, T. C.; Wheatley, R. J. Chem. Commun. 2008, 5909-5911.
http://dx.doi.org/10.1039/b812691g
"""
class ISAScheme(StockholderScheme):
prefix = "isa"
usage = isa_usage
@classmethod
def new_from_args(cls, context, args):
r_low = 2.0e-5*angstrom
r_high = 20.0*angstrom
steps = 100
if len(args) == 0:
pass
elif len(args) == 3:
r_low = float(args[0])*angstrom
r_high = float(args[1])*angstrom
steps = float(args[2])
else:
raise ParseError("The ISA scheme requires zero or three scheme arguments.")
return cls(context, RLogIntGrid(r_low, r_high, steps))
def __init__(self, context, rgrid):
extra_tag_attributes = {
"max_iter": str(context.options.max_iter),
"threshold": "%.5e" % context.options.threshold,
}
BaseScheme.__init__(self, context, rgrid, {})
@OnlyOnce("Iterative Stockholder Analysis")
def do_proatomfns(self):
self.do_atgrids_moldens()
log("Generating initial guess for the pro-atoms")
self.proatomfns = []
for i in xrange(self.molecule.size):
densities = self.atgrids[i].moldens
profile = self.agrid.minimum(densities)
profile[profile < 1e-6] = 1e-6
self.proatomfns.append(CubicSpline(self.rgrid.rs, profile))
counter = 0
old_populations = self.wavefn.nuclear_charges.copy()
log("Iteration Max change Total charge")
while True:
new_proatomfns = []
populations = numpy.zeros(self.molecule.size, float)
for i in xrange(self.molecule.size):
integrand = self.atgrids[i].moldens*self._compute_atweights(self.atgrids[i], i)
radfun = self.agrid.integrate(integrand)
rs = self.rgrid.rs[:len(radfun)]
populations[i] = self.rgrid.integrate(radfun*rs*rs)
# add negligible tails to maintain a complete partitioning
radfun[radfun < 1e-40] = 1e-40
new_proatomfn = CubicSpline(self.rgrid.rs, radfun/4*numpy.pi)
new_proatomfns.append(new_proatomfn)
# ordinary blablabla ...
max_change = abs(populations-old_populations).max()
log("%5i % 10.5e % 10.5e" % (
counter, max_change, self.wavefn.nuclear_charges.sum() - populations.sum()
))
if max_change < self.context.options.threshold:
break
counter += 1
if counter > self.context.options.max_iter:
raise RuntimeError("Iterative Stockholder Analysis failed to converge.")
old_populations = populations
self.proatomfns = new_proatomfns
becke_usage = """ * Becke's Smooth Voronoi Partitioning
scheme = becke
scheme parameters = [k] [r_low r_high steps]
The parameter k is optional and defaults to 3. It is the number of
iterations in the definition of the weight function in Becke's paper.
Three additional parameters can be provided of the file rs.bin is not yet
present in the work directory. The first two, r_low and r_high, are the
first and the last point on the logarithmic radial grid in angstrom. The
third, steps, is the number of grid points on the radial grid. The default
is 2.0e-5, 20.0 and 100, respectively.
Becke, A. D. J. Chem. Phys. 1988, 88, 2547-2553.
http://dx.doi.org/10.1063/1.454033
"""
class BeckeScheme(BaseScheme):
prefix = "becke"
usage = becke_usage
@classmethod
def new_from_args(cls, context, args):
k = 3
r_low = 2.0e-5*angstrom
r_high = 20.0*angstrom
steps = 100
if len(args) == 0:
pass
elif len(args) == 1:
k = int(args[0])
elif len(args) == 3:
r_low = float(args[0])*angstrom
r_high = float(args[1])*angstrom
steps = float(args[2])
elif len(args) == 4:
k = int(args[0])
r_low = float(args[1])*angstrom
r_high = float(args[2])*angstrom
steps = float(args[3])
else:
raise ParseError("The becke scheme requires zero, one, three or four scheme arguments.")
return cls(context, k, RLogIntGrid(r_low, r_high, steps))
def __init__(self, context, k, rgrid):
if k <= 0:
raise ValueError("The parameter k must be strictly positive.")
self.k = k
BaseScheme.__init__(self, context, rgrid, {"becke_k": str(k)})
@OnlyOnce("Becke's Smooth Voronoi Partitioning")
def _prepare_atweights(self):
# Compute the cell functions on all grids
self.do_atgrids()
radii = numpy.array([periodic[n].covalent_radius for n in self.molecule.numbers])
N = self.molecule.size
pb = log.pb("Computing/Loading cell functions", N)
for i in xrange(N):
pb()
# working in the grid of atom i
grid = self.atgrids[i]
# first try to load. if it fails then compute.
grid.cell_functions = grid.load("cell_functions")
if grid.cell_functions is None:
# load failed, so compute
grid.cell_functions = numpy.ones((N, grid.size), float)
for j0 in xrange(N):
for j1 in xrange(j0):
# working on the contribution from atom pair j0,j1
# determine the displacement of the cell boundary with
# respect to the center based on covalent radii
d = self.molecule.distance_matrix[j0,j1]
u = (radii[j0]-radii[j1])/(radii[j1]+radii[j0])
a = u/(u**2-1)
if a < -0.45: a = -0.45
elif a > 0.45: a = 0.45
# construct the switching function
switch = grid.distances[j0].copy()
switch -= grid.distances[j1]
switch /= d
switch = switch + a*(1-switch**2) # hetero
for k in xrange(self.k):
switch = 0.5*(3.0 - switch**2)*switch
switch += 1.0
switch /= 2.0
grid.cell_functions[j0] *= 1-switch
grid.cell_functions[j1] *= switch
# dump cell functions
grid.dump("cell_functions", grid.cell_functions)
else:
grid.cell_functions = grid.cell_functions.reshape((N, -1))
grid.cell_sum = sum(grid.cell_functions)
pb()
def _compute_atgrid_atweights(self):
for i in xrange(self.molecule.size):
grid = self.atgrids[i]
grid.atweights = grid.cell_functions[i]/grid.cell_sum
def _compute_atweights(self, grid, atom_index):
"""Return the weight of atom with given index in the given grid points
"""
return grid.cell_functions[atom_index]/grid.cell_sum
# find all usable Scheme classes
scheme_classes = {}
for x in globals().values():
if isinstance(x, type) and issubclass(x, BaseScheme) and x.prefix is not None:
scheme_classes[x.prefix] = x
|
molmod/hipart
|
hipart/schemes.py
|
Python
|
gpl-3.0
| 44,551
|
[
"CP2K",
"CPMD",
"GAMESS",
"Gaussian"
] |
861484eae3121edb0c80855611e0d6860f06418386d3b8d4e725ca60ca914a53
|
from datetime import datetime
import logging
from time import time
from sqlalchemy.exc import IntegrityError, OperationalError
from sqlalchemy.sql import and_, or_
from .select_or_insert import SelectOrInsert
from .stripXML import strip as stripXML
from .tables import AdmissionSource, SpecimenSource
from .tables import PerformingLab, LabFlag
from .tables import OrderNumber, ReferenceRange
from .tables import AssignedLocation, Race
from .tables import AdmitReason, ChiefComplaint, FluVaccine
from .tables import H1N1Vaccine, AdmissionO2sat
from .tables import AdmissionTemp, Pregnancy, Note
from .tables import LabResult, Location, Visit
from .tables import MessageProcessed, ServiceArea
from .tables import Disposition, VisitLabAssociation
from .tables import Diagnosis, VisitDiagnosisAssociation
from pheme.util.pg_access import AlchemyAccess
from pheme.warehouse.tables import ObservationData, HL7_Nte, FullMessage
from pheme.util.util import getDobDatetime, getYearDiff, inProduction
from pheme.util.util import none_safe_min as min
from pheme.util.util import none_safe_max as max
def pdb_hook():
"""Debugging hook for multi-processing
Using multi-processing requires restoration of stdin/out
Depending on where the hook is set, might consider reducing
longitudinal_manager.NUM_PROCS to 1 to avoid confusion.
"""
import pdb
pdb.Pdb(stdin=open('/dev/stdin', 'r+'),
stdout=open('/dev/stdout', 'r+')).set_trace()
class ClinicalInfo(object):
"""Base class for `clinical information` surrogates
The derived surrogats are used to manage the extraction and
deduplication of any clinical data. This includes patient age,
self-reported vaccination status, pregnancy, body temperature and
oxygen saturation
"""
def __init__(self, result, units):
self.result = stripXML(result)
self.units = units
def obr_index(hl7_obr_id, surrogate_lab_list):
"""Finds the index to the SurrogateLab with the hl7_obr_id
Returns the first match found, raises KeyError otherwise
"""
for lab, index in zip(surrogate_lab_list,
range(0, len(surrogate_lab_list))):
if lab.hl7_obr_id == hl7_obr_id:
return index
raise KeyError("no lab with hl7_obr_id %d" % hl7_obr_id)
def obx_index(hl7_obx_id, surrogate_lab_list):
"""Finds the index to the SurrogateLab with the hl7_obx_id
raises KeyError if no match, or multiple matches are found
"""
match = None
for lab, index in zip(surrogate_lab_list,
range(0, len(surrogate_lab_list))):
if hl7_obx_id in lab.hl7_obx_ids:
if match != None:
raise KeyError("multiple labs with hl7_obx_id %d" %
hl7_obx_id)
match = index
if match is None:
raise KeyError("no lab with hl7_obx_id %d" % hl7_obx_id)
return match
class SurrogatePatientAge(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
if not self.units == 'Years':
# Current db schema only holds 'years'. Skip storage if
# in a different unit, allow _calculateAge to take over.
return
visit.visit.age = int(self.result)
class SurrogateInfluenzaVaccine(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
flu = FluVaccine(status=self.result)
flu = visit.parent_worker.flu_vaccine_lock.fetch(flu)
visit.visit.dim_flu_vaccine_pk = flu.pk
class SurrogateH1N1Vaccine(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
vac = H1N1Vaccine(status=self.result)
vac = visit.parent_worker.h1n1_vaccine_lock.fetch(vac)
visit.visit.dim_h1n1_vaccine_pk = vac.pk
class SurrogateO2Saturation(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
if not self.units in ['Percent',
'PercentOxygen[Volume Fraction Units]']:
raise ValueError(self.units)
# Occasionally the percentage comes in with a trailing '.'
# which kills the int cast - chop if present
if self.result.endswith('.'):
self.result = self.result[:-1]
sat = AdmissionO2sat(o2sat_percentage=int(self.result))
sat = visit.parent_worker.admission_o2sat_lock.fetch(sat)
visit.visit.dim_admission_o2sat_pk = sat.pk
class SurrogateBodyTemp(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
if not self.units == 'Degree Fahrenheit [Temperature]':
raise ValueError(self.units)
#Natasha has requested we limit the precision to one
#decimal place:
self.result = "%.1f" % float(self.result)
temp = AdmissionTemp(degree_fahrenheit=self.result)
temp = visit.parent_worker.admission_temp_lock.fetch(temp)
visit.visit.dim_admission_temp_pk = temp.pk
class SurrogatePregnancy(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
# The pregnancy message uses a 'CE' data type OBX statement,
# which translates to needing the 5.2 portion of the result
# so grab the value between the first and second pipes
segments = self.result.split('|')
prego = Pregnancy(result=segments[1])
prego = visit.parent_worker.pregnancy_lock.fetch(prego)
visit.visit.dim_pregnancy_pk = prego.pk
class SurrogateChiefComplaint(ClinicalInfo):
def associate(self, visit):
"""Link this instance with the given visit """
cc = ChiefComplaint(chief_complaint=self.result)
cc = visit.parent_worker.chief_complaint_lock.fetch(cc)
visit.visit.dim_cc_pk = cc.pk
#TODO: add '43137-9'; Clinical Finding - CONDITION OF INTEREST PRESENT
#TODO: consider other message types for age - all other codes below
# only come in on ORU^R01^ORU_R01 - calculated age does not.
clinical_codes_of_interest = {
'8661-1': SurrogateChiefComplaint,
'29553-5': SurrogatePatientAge,
'46077-4': SurrogateInfluenzaVaccine,
'29544-4': SurrogateH1N1Vaccine,
'20564-1': SurrogateO2Saturation,
'59408-5': SurrogateO2Saturation,
'8310-5': SurrogateBodyTemp,
'11449-6': SurrogatePregnancy,
}
class SurrogateDiagnosis(object):
"""A stand-in for each diagnosis built up during deduplication
Diagnosis details are split between two DAO objects, the Diagnosis
itself (icd9 & description), and the assocation which besides
defining the association between the visit and the diagnosis,
contains status & dx_datetime.
This class simplifies adding and merging previously bound
diagnoses with any new ones found during the longitudinal
process.
Each instance is intended to be 'Set' friendly, making it easy to
compare and deduplicate, i.e. they are `hashable` and therefore
immutable.
"""
def __init__(self, rank, icd9, description, status, dx_datetime):
"""Store all the values defining this diagnosis.
NB - these are to be treated as immutable objects, as part of
the `hashable` contract. dx_datetime is not part of the
unique dx definition.
"""
self.rank = rank
self.icd9 = icd9
self.description = description
self.status = status
self.dx_datetime = dx_datetime
# Mark this immutable object as complete
self._initialized = True
def __setattr__(self, name, value):
if hasattr(self, '_initialized'):
raise TypeError("immutable object can't be changed")
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
raise TypeError("immutable object can't be changed")
def __hash__(self):
"""Calculate and return a hash value for the instance
Only fields in the respective DAO objects query_fields lists
should be considered. Other fields such as dx_datetime would
result in IntegrityErrors as they aren't included in defining
a unique diagnosis.
returns a hash value for this instance
"""
if not hasattr(self, '_hashvalue'):
hv = self.icd9.__hash__() +\
self.status.__hash__()
object.__setattr__(self, '_hashvalue', hv)
return self._hashvalue
def __cmp__(self, other):
return cmp(self.__hash__(), other.__hash__())
class SurrogateLab(object):
"""A stand-in for each lab result built up during deduplication
Lab details are split between several DAO objects, the LabResult
itself (test_code, test_text, coding, result, result_unit) and the
VisitLabAssocation which besides defining the association between
the visit and the lab, contains status, collection_datetime,
report_datetime and a number of related dimensional table foreign
keys pointing to a LabFlag, OrderNumber, ReferenceRange, Note,
PerformingLab and SpecimenSource
This class simplifies adding and merging previously bound labs
with any new ones found during the longitudinal process. Only
fields in the respective DAO `query_fields` tuples are considered
in the comparison methods - so the first `unique` value will be
kept. For example, a new `report_datetime` is not part of the
unique diagnosis definition, so a second lab differing only on
such a field will be ignored.
Each instance is intended to be 'Set' friendly, making it easy to
compare and deduplicate, i.e. they are `hashable` and therefore
immutable - note the `append_result` exception.
"""
MAX_RESULT_LEN = 500
def __init__(self, test_code, test_text, coding, result, units,
status, lab_flag, specimen_source, performing_lab,
order_number, reference_range,
collection_datetime, report_datetime,
hl7_obr_id=None, hl7_obx_id=None):
"""Store all the values defining this lab result.
these are to be treated as immutable objects, as part of
the `hashable` contract. `report_datetime` is not part of the
unique lab definition, nor are the foreign key associations
{specimen_source, performing_lab, order_number,
reference_range, lab_flag, note}.
The `append_result` method side steps the immuatable contract,
but does raise an exception if results are appended AFTER a
call to __hash__ has been made - so make sure the object is
complete before doing any sorting or inserting in sorted
container types.
As notes are collected as a later step, and don't affect
unique checks or sorting, the note property is also an
exception to the immutable contract, and not part of this
initialization.
"""
self.test_code = test_code
self.test_text = test_text
self.coding = coding
self.result = result
self.units = units
self.status = status
self.lab_flag = lab_flag
self.performing_lab = \
PerformingLab(local_code=performing_lab)\
if performing_lab else None
self.specimen_source =\
SpecimenSource(source=specimen_source)\
if specimen_source else None
self.order_number = \
OrderNumber(filler_order_no=order_number)\
if order_number else None
self.reference_range = \
ReferenceRange(range=reference_range)\
if reference_range else None
self.collection_datetime = collection_datetime
self.report_datetime = report_datetime
self.hl7_obr_id = hl7_obr_id
self.hl7_obx_ids = [hl7_obx_id, ] if hl7_obx_id else None
self.note = None
# Mark this immutable object as complete
self._initialized = True
@classmethod
def from_VisitLabAssociation(cls, vla):
"""SurrogateLab factory method from VisitLabAssociation"""
ss = vla.specimen_source.source if \
vla.dim_specimen_source_pk else None
pl = vla.performing_lab.local_code if \
vla.dim_performing_lab_pk else None
on = vla.order_number.filler_order_no if \
vla.dim_order_number_pk else None
rr = vla.reference_range.range if \
vla.dim_ref_range_pk else None
return cls(test_code=vla.lab.test_code,
test_text=vla.lab.test_text,
coding=vla.lab.coding,
result=vla.lab.result,
units=vla.lab.result_unit,
status=vla.status,
collection_datetime=vla.collection_datetime,
report_datetime=vla.report_datetime,
specimen_source=ss,
performing_lab=pl,
order_number=on,
reference_range=rr,
lab_flag=vla.lab_flag,)
def append_result(self, result, hl7_obx_id):
"""This method is an exception to the immutable object
contract, allowing the user to continue to build up the result
after object creation.
NB - this may only be used prior to any calls to
`self.__hash__`, so make sure the object is complete before
adding to a sorted container.
Note also, the database limit of MAX_RESULT_LEN character
length for this field is adhered to here - dropping anything
beyond the max result length in the bit bucket.
:param result: the additional result to append to the result
thus far.
:param hl7_obx_id: the datawarehouse hl7_obx_id the result
came from - necessary for potential note
associations
"""
if hasattr(self, '_hashvalue'):
raise TypeError("append_result can't be called after "\
"__hash__")
id_list = self.hl7_obx_ids + [hl7_obx_id, ]
object.__setattr__(self, 'hl7_obx_ids', id_list)
if result is None:
return
if self.result is not None:
new_result = (" ".join((self.result,
result)))[:self.MAX_RESULT_LEN]
else:
new_result = result[:self.MAX_RESULT_LEN]
object.__setattr__(self, 'result', new_result)
def set_note(self, value):
"""Notes are looked up as a second step, and are not part of
the unique contract, so this setter doesn't oblidge by the
immutable contract.
"""
if value is not None and len(value):
note = Note(note=value)
object.__setattr__(self, 'note', note)
else:
object.__setattr__(self, 'note', None)
def __setattr__(self, name, value):
if hasattr(self, '_initialized'):
raise TypeError("immutable object can't be changed")
else:
if name == 'result' and value is not None:
value = value[:self.MAX_RESULT_LEN]
object.__setattr__(self, name, value)
def __delattr__(self, name):
raise TypeError("immutable object can't be changed")
def __hash__(self):
"""Calculate and return a hash value for the instance
Only fields in the respective DAO objects query_fields lists
should be considered. Other fields such as report_datetime
would result in IntegrityErrors as they aren't included in
defining a unique lab result.
NB - we're overlooking at the moment that two identical
labs that differ by one of the other dimensions (note,
specimen_source, lab_flag, reference_range) won't create
unique rows.
returns a hash value for this instance
"""
if not hasattr(self, '_hashvalue'):
hv = self.test_code.__hash__() +\
self.test_text.__hash__() +\
self.coding.__hash__() +\
self.result.__hash__() +\
self.units.__hash__() +\
self.status.__hash__()
object.__setattr__(self, '_hashvalue', hv)
return self._hashvalue
def __cmp__(self, other):
return cmp(self.__hash__(), other.__hash__())
class SurrogateVisit(object):
""" Surrogate visits built up during the longitudinal process
During the process of generating the longitudinal visit for a
particular visit_id, a surrogate visit for each defined patient
class is used to gather the best data, and house the more
complicated logic to determine what should be kept, updated and
ignored.
A parent_worker attribute is maintainted so the surrogate can
access attributes of the owning longitudinal worker, such as the
table locks needed when looking up existing records.
"""
def __init__(self, parent_worker, visit):
"""Handles a number of tricky related values via properties
:param parent_worker: The longitudinal_worker that
instantiated this instance. Used to reference contained
locks, etc.
:param visit: The visit instance representing the DBO
"""
self.parent_worker = parent_worker
self.visit = visit
self._admission_source = None
self._assigned_location = None
self._admit_reason = None
self._chief_complaint = None
self._disposition = None
self._location = None
self._service_area = None
self._diagnoses = set()
self._labs = set()
self._clinical_info = {}
self._race = None
def _get_admission_source(self):
return self._admission_source
def _set_admission_source(self, admission_source):
"""Stores the new admission_source provided
Simply keeps the latest, provided it has a value
:param admission_source: the `admission_source` (code)
directly from the HL7 message (PV1.14.1). These are really
lookup values, the description for each lives in the
dim_admission_source table.
"""
assert(admission_source and admission_source.strip())
self._admission_source = AdmissionSource(pk=admission_source)
admission_source = property(_get_admission_source, _set_admission_source)
def associate_admission_source(self):
"""Bind the visit DAO with the admission_source, if set """
if not self.admission_source:
return
a_s = self.parent_worker.admission_source_lock.\
fetch(self.admission_source)
self.visit.dim_admission_source_pk = a_s.pk
def _get_assigned_location(self):
return self._assigned_location
def _set_assigned_location(self, location):
"""Stores the new assigned_location provided
Simply keeps the latest, provided it has a value
This method (and _set_service_area) also manage the
'ever_in_icu' logic, as the qualifying assigned_location may
chanage. If we ever see a qualifying value, set ever_in_icu
to True. It's default value of False handles the obvious, and
don't set it back false to overwrite what was possibly seen.
:param assigned_location: the `assigned_patient_location`
directly from the HL7 message (PV1.3.1).
"""
assert(location and location.strip())
if location.endswith('ICU') or location.endswith('ACU') or \
location in ('ACUI',):
self.visit.ever_in_icu = True
self._assigned_location = AssignedLocation(location=location)
assigned_location = property(_get_assigned_location,
_set_assigned_location)
def establish_associations(self):
"""Establish any associations available with this visit
During the longitudinal process, many attributes on this
surrogate visit class were set. Those all need to be
associated, typically by setting a foreign key value on the
visit DAO instance.
A number of 'associate_*' methods are defined for this class.
This method looks up all callable attributes starting with the
'associate' string, and calls them in turn.
We also keep track of any 'related changes', that is, changes
to the visit, that aren't made on the visit DAO itself, as a
timestamp is kept current for any changes made. The associate
tables are such an example, where any new rows in an associate
table including this visit's foreign key would count as a
change on this instance. NB - it is the obligation of the
associate methods to return True in such a case.
returns True if any changes were made to the visit object that
aren't done to the visit DOA itself.
"""
changed = []
for attr in dir(self):
if attr.startswith('associate'):
method = getattr(self, attr)
if callable(method):
changed.append(method())
return any(changed)
def associate_assigned_location(self):
"""Bind the visit DAO with the assigned_location, if set """
if not self.assigned_location:
return
al = self.parent_worker.assigned_location_lock.\
fetch(self.assigned_location)
self.visit.dim_assigned_location_pk = al.pk
def _get_admit_reason(self):
return self._admit_reason
def _set_admit_reason(self, admit_reason):
"""Stores the new admit_reason provided
Simply keeps the latest, provided it has a value
:param admit_reason: the `admit_reason` (code) directly from the
HL7 message (PV2.3.2 or PV2.3.5).
"""
assert(admit_reason and admit_reason.strip())
self._admit_reason = AdmitReason(admit_reason=admit_reason)
admit_reason = property(_get_admit_reason, _set_admit_reason)
def associate_admit_reason(self):
"""Bind the visit DAO with the admit_reason, if set """
if not self.admit_reason:
return
ar = self.parent_worker.admit_reason_lock.\
fetch(self.admit_reason)
self.visit.dim_ar_pk = ar.pk
def _get_chief_complaint(self):
return self._chief_complaint
def _set_chief_complaint(self, chief_complaint):
"""Stores the new chief_complaint provided
Simply keeps the latest, provided it has a value
:param chief_complaint: the `chief_complaint` (code) directly from the
HL7 message (PV2.3.2 or PV2.3.5).
"""
assert(chief_complaint and chief_complaint.strip())
self._chief_complaint = ChiefComplaint(chief_complaint=chief_complaint)
chief_complaint = property(_get_chief_complaint, _set_chief_complaint)
def associate_chief_complaint(self):
"""Bind the visit DAO with the chief_complaint, if set """
if not self.chief_complaint:
return
cc = self.parent_worker.chief_complaint_lock.\
fetch(self.chief_complaint)
self.visit.dim_cc_pk = cc.pk
def add_clinical_info(self, test_code, result, units):
"""Add clinical info to the visit
There are a number of LOINC codes considered to be of
interest, generally with their own dimension table in the
database for persistance. This SurrogateVisit instance
maintains a dictionary of all the clinical info found during
the longitudinal process. Keyed by the LOINC code, with an
instance of `ClinicalInfo` for each one found.
If there already exists an entry for the LOINC code for this
SurrogateVisit, ignore the new data - no updates kept for
clinical information.
"""
if test_code in self._clinical_info:
return
if result is None or len(result.strip()) == 0:
return
klass = clinical_codes_of_interest.get(test_code)
self._clinical_info[test_code] = klass(result, units)
def associate_clinical_info(self):
"""Create associations for new clinical info with the visit
Each value in the `self._clinical_info` dictionary points to
an instance of a class knowing how to link itself.
"""
for ci in self._clinical_info.values():
ci.associate(self)
def _get_diagnoses(self):
return self._diagnoses
diagnoses = property(_get_diagnoses)
def add_diagnosis(self, rank, icd9, description, status,
dx_datetime):
"""Stores the new diagnosis provided
Add this diagnosis to the list, unless we already appear to
have the same (icd9, description, status). NB, the schema
design breaks status & dx_datetime apart from icd9 & description.
NB - self._diagnoses is a list of tuples containing:
(Diagnosis, status, dx_datetime)
:param diagnosis: the `diagnosis` (code) directly from the
HL7 message (PV1.36).
"""
assert(icd9 and icd9.strip())
# use a set to control duplicate entries
self._diagnoses.add(SurrogateDiagnosis(rank=rank,
icd9=icd9,
description=description,
status=status,
dx_datetime=dx_datetime))
def associate_diagnoses(self):
"""Bind any new diagnoses with the visit
Load in any existing diagnoses associations, add in only new
ones that didn't previously exist.
returns True if any new associations were persisted to the
database.
"""
if not self._diagnoses:
return False
# First load in any existing, to avoid adding duplicates
existing = self.parent_worker.data_mart.session.\
query(VisitDiagnosisAssociation).\
filter(VisitDiagnosisAssociation.fact_visit_pk ==
self.visit.pk)
existing_set = set()
for d in existing:
existing_set.add(SurrogateDiagnosis(
rank=d.rank,
icd9=d.dx.icd9,
description=d.dx.description,
status=d.status,
dx_datetime=d.dx_datetime))
new_ones = self._diagnoses - existing_set
new_associations = []
for diagnosis in new_ones:
diag_part = Diagnosis(icd9=diagnosis.icd9,
description=diagnosis.description)
d = self.parent_worker.diagnosis_lock.fetch(diag_part)
new_associations.append(VisitDiagnosisAssociation(\
fact_visit_pk=self.visit.pk,
dim_dx_pk=d.pk,
rank=diagnosis.rank,
status=diagnosis.status,
dx_datetime=diagnosis.dx_datetime))
self.parent_worker.data_mart.session.add_all(new_associations)
self.parent_worker.data_mart.session.commit()
if new_associations:
return True
return False
def _get_disposition(self):
return self._disposition
def _set_disposition(self, disposition):
"""Stores the new disposition provided
Simply keeps the latest, provided it has a value
:param disposition: the `disposition` (code) directly from the
HL7 message (PV1.36).
"""
assert(disposition and disposition.strip())
self._disposition = Disposition(code=disposition)
disposition = property(_get_disposition, _set_disposition)
def associate_disposition(self):
"""Bind the visit DAO with the disposition, if set """
if not self.disposition:
return
d = self.parent_worker.disposition_lock.fetch(self.disposition)
self.visit.dim_disposition_pk = d.code
def _get_labs(self):
return self._labs
def _set_labs(self, labs):
"""Stores the list of new labs provided
Deduplicates the list of labs, and hangs onto for
association. It is expected this method will be called zero
or one time per SurrogateVisit - an exception is raised if
called when labs have already been set.
:param labs: an ordered list of SurrogateLab instances
"""
if self._labs:
raise ValueError("labs already set")
# as self._labs is a set, addition in order will control
# duplicates and retain the ones we want.
for lab in labs:
self._labs.add(lab)
labs = property(_get_labs, _set_labs)
def associate_labs(self):
"""Bind any new labs with the visit
Load in any existing lab associations, add in only new
ones that didn't previously exist.
returns True if any new associations were persisted to the
database.
"""
if not self._labs:
return False
# First load in any existing, to avoid adding duplicates
existing = self.parent_worker.data_mart.session.\
query(VisitLabAssociation).\
filter(VisitLabAssociation.fact_visit_pk ==
self.visit.pk)
existing_set = set()
for e in existing:
existing_set.add(SurrogateLab.from_VisitLabAssociation(e))
new_ones = self._labs - existing_set
new_associations = []
for lab in new_ones:
result_part = LabResult(test_code=lab.test_code,
test_text=lab.test_text,
coding=lab.coding,
result=lab.result,
result_unit=lab.units)
r = self.parent_worker.lab_result_lock.fetch(result_part)
if lab.lab_flag:
lf = self.parent_worker.lab_flag_lock.\
fetch(lab.lab_flag)
lf_pk = lf.pk
else:
lf_pk = None
if lab.performing_lab:
pl = self.parent_worker.performing_lab_lock.\
fetch(lab.performing_lab)
pl_pk = pl.pk
else:
pl_pk = None
if lab.specimen_source:
ss = self.parent_worker.specimen_source_lock.\
fetch(lab.specimen_source)
ss_pk = ss.pk
else:
ss_pk = None
if lab.order_number:
on = self.parent_worker.order_number_lock.\
fetch(lab.order_number)
on_pk = on.pk
else:
on_pk = None
if lab.reference_range:
rr = self.parent_worker.reference_range_lock.\
fetch(lab.reference_range)
rr_pk = rr.pk
else:
rr_pk = None
if lab.note:
note = self.parent_worker.note_lock.\
fetch(lab.note)
note_pk = note.pk
else:
note_pk = None
new_associations.append(VisitLabAssociation(\
fact_visit_pk=self.visit.pk,
dim_lab_result_pk=r.pk,
status=lab.status,
collection_datetime=lab.collection_datetime,
report_datetime=lab.report_datetime,
dim_lab_flag_pk=lf_pk,
dim_specimen_source_pk=ss_pk,
dim_performing_lab_pk=pl_pk,
dim_order_number_pk=on_pk,
dim_ref_range_pk=rr_pk,
dim_note_pk=note_pk))
self.parent_worker.data_mart.session.add_all(new_associations)
self.parent_worker.data_mart.session.commit()
if new_associations:
return True
return False
def _get_location(self):
return self._location
def _set_location(self, location):
"""Stores the new location provided, letting the old drop in
the bit bucket.
NB - we don't update locations, as they tend to change, and
the old and new don't necessarily have any relation. Just
keep the latest.
:param location: a prepared `Location` instance, not
necessarily persisted.
"""
self._location = location
location = property(_get_location, _set_location)
def associate_location(self):
"""Bind the visit DAO with the location, if set """
if not self.location:
return
loc = self.parent_worker.location_lock.fetch(self.location)
self.visit.dim_location_pk = loc.pk
def _get_race(self):
return self._race
def _set_race(self, race):
"""Stores the new race/ethnicity provided, letting the old
drop in the bit bucket.
:param race: the race or ethnicity string from which ever
value in the HL/7 PID field was valid (PID.22.2
or PID.10.2)
"""
self._race = Race(race=race)
race = property(_get_race, _set_race)
def associate_race(self):
"""Bind the visit DAO with the race, if set """
if not self.race:
return
race = self.parent_worker.race_lock.fetch(self.race)
self.visit.dim_race_pk = race.pk
def _get_service_area(self):
return self._service_area
def _set_service_area(self, service_area):
"""Stores the new service_area provided
Simply keeps the latest, provided it has a value
This method (and _set_assigned_location) also manage the
'ever_in_icu' logic, as the qualifying assigned_location may
chanage. If we ever see a qualifying value, set ever_in_icu
to True. It's default value of False handles the obvious, and
don't set it back false to overwrite what was possibly seen.
:param service_area: the `service_code` directly from the HL7
message (PV1.10.1).
"""
assert(service_area and service_area.strip())
if service_area in ('INT', 'PIN'):
self.visit.ever_in_icu = True
self._service_area = ServiceArea(area=service_area)
service_area = property(_get_service_area, _set_service_area)
def associate_service_area(self):
"""Bind the visit DAO with the service_area, if set """
if not self.service_area:
return
sa = self.parent_worker.service_area_lock.fetch(self.service_area)
self.visit.dim_service_area_pk = sa.pk
class ObxSequence(object):
"""Type for inconsistent sequence types in OBX-4.1
The OBX sub-id (hl7_obx.sequence) can be None, integer or floating
point. Data type to store and handle comparison in calulating if
it's time for an increment in NextLabState
"""
def __init__(self, sequence=None):
self._set_seq(sequence)
def _get_seq(self):
return self.__seq
# Treat sequence as a read only property - don't expose setter
sequence = property(_get_seq)
def reset(self):
self.__seq = None
def _set_seq(self, sequence):
this_sequence = sequence.strip() if sequence else None
if this_sequence:
dot_index = this_sequence.find('.')
if dot_index > 0:
self.__whole = int(this_sequence[0:dot_index])
self.__frac = int(this_sequence[dot_index + 1:])
else:
self.__whole = int(this_sequence)
self.__frac = None
self.__seq = this_sequence
def in_sequence_with(self, other):
"""Compare this with another - only supports same type
This method checks to see if 'other' might be in_sequence_with
'self'. Two cases in which this will happen:
* Both self and other have the same whole value, and the
fractional part of other is greater than that of self
(i.e. 1.1 followed by 1.2)
* Both self and other have the same, non zero, fractional
value, and the whole part of other is greater than that of
self (i.e. 1.1 followed by 2.1)
:param other: the other sequence to compare self to, does
other look to be in sequence with self.
returns True if other appears to be in sequence with self,
False otherwise.
"""
if not isinstance(other, ObxSequence):
raise ValueError("comparison of non ObxSequence not "
"supported")
result = False
if self.__seq and other.__seq:
# Look for 1.1 -> 1.2 case
if self.__whole == other.__whole and\
self.__frac and other.__frac and\
self.__frac < other.__frac:
result = True
# Look for 1.1 -> 2.1 case
elif self.__whole < other.__whole and\
self.__frac and other.__frac and\
self.__frac == other.__frac:
result = True
return result
class NextLabState(object):
"""State machine to determine when to start the next lab
The task of breaking out individual labs as they come in is
messy. This class manages state info, implementing a number of
transition methods to determine when a new lab should be
generated, and when the previous needs more data.
The logic for separation of labs includes:
1. next obr
2. next obx within an obr without a defined sequence
3. next obx within an obr with a new significant value
in the sequence, i.e. 1.1 followed by 2.1
4. next obx within an obr with a non-increasing value
in the sequence, i.e. 1.1 followed by 1
"""
def __init__(self):
"""Reset internal state"""
self.__active_index = 0
self.__active_lab_set = False
self.__last_sequence = ObxSequence()
def _get_active_index(self):
return self.__active_index
def __bump_active_index(self):
self.__active_index += 1
self.__active_lab_set = False
self.__last_sequence.reset()
# Treat index as a read only property - don't expose setter
index = property(_get_active_index)
def transition_new_obr(self):
"""Call any time a new obr is found"""
if self.__active_lab_set:
self.__bump_active_index
def transition_new_obx(self, sequence, code):
"""Call any time a new obx is found
Determine if it's time to bump the active index.
:param sequence: The OBX-4.1 sub-ID field. Typically None,
an integer or a 1.1 format float.
:param code: The loinc or local code - if it changed since
last known, it's bump time regardless of the sequence.
"""
this_sequence = ObxSequence(sequence)
if self.__active_lab_set:
if self.__last_code != code:
self.__bump_active_index()
elif not self.__last_sequence.in_sequence_with(this_sequence):
self.__bump_active_index()
self.__last_sequence = this_sequence
# Call here implies a new lab is being added
self.__active_lab_set = True
self.__last_code = code
def _preferred_lab_data(obr, obx):
"""Function to pick the best code, text & coding for lab
A lab result consists of the observation request (OBR) and the
observation result. In both, there exists both a preferred
coding system, such as LOINC, and an alternative, such as
local.
We prefer the OBX codes, if non null, and within prefer the
standardized coding system over local. If nothing is
available in the OBX, default to the OBR, again preferring
standarized codes.
The evaluation is done on the codes themselves, returning the
matching text and coding system. Therefore a null text may be
returned even if there was a defined text on a less favorable
group, if the more favorable has a defined code.
:param obr: Observation Request (HL7_Obr) for this lab
:param obx: Observation Result (HL7_Obx) for this lab
returns the list (preferred_code, preferred_text, coding)
"""
# Using only the code to determine what's available,
# start with OBX preferred coding and move on down
if obx.observation_id:
code = obx.observation_id
text = obx.observation_text if obx.observation_text\
else None
coding = obx.coding if obx.coding else None
elif obx.alt_id:
code = obx.alt_id
text = obx.alt_text if obx.alt_text else None
coding = obx.alt_coding if obx.alt_coding else None
elif obr.loinc_code:
code = obr.loinc_code
text = obr.loinc_text if obr.loinc_text else None
coding = obr.coding if obr.coding else None
elif obr.alt_code:
code = obr.alt_code
text = obr.alt_text if obr.alt_text else None
coding = obr.alt_coding if obr.alt_coding else None
else:
raise ValueError("no valid codes found for OBX or OBR")
return (code, text, coding)
def _preferred_lab_flag(obx):
"""Grabs the preferred lab flag data from the obx row
Lab flags include an identifier, text and coding. There is both a
preferred and an alternate set of each. There may also be none of
the above defined in the source HL7 obx segment.
Given the HL7_Obx row data, extract the best lab flag data
available. If no data is found, return None. Otherwise, return
the best `LabFlag` available
"""
if not any((obx.abnorm_id, obx.abnorm_text, obx.alt_abnorm_id,
obx.alt_abnorm_text)):
return None
code, text, coding = None, None, None
if obx.abnorm_id or obx.abnorm_text:
code = obx.abnorm_id
text = obx.abnorm_text
coding = obx.abnorm_coding
else:
code = obx.alt_abnorm_id
text = obx.alt_abnorm_text
coding = obx.alt_abnorm_coding
return LabFlag(code=code, code_text=text, coding=coding)
class LongitudinalWorker(object):
""" Deduplicate a visit.
Does actual processing for a visit. This class is designed to be
run concurrently with any number of like workers, in a
multi-process environment. Multi-threaded proved to be a very
expensive thrashing experiment due to python's GIL (global
interpreter lock). Running as seperate processes, we sidestep the
GIL bottleneck. The real win is that each process has its own
database connection, so time spent waiting on the db gives the
other processes time to execute.
"""
def __init__(self, queue, procNumber, data_warehouse, data_mart,
table_locks={}, dbHost='localhost', dbUser=None,
dbPass=None, mart_port=5432, warehouse_port=5432,
verbosity=0):
self.data_warehouse = AlchemyAccess(database=data_warehouse,
port=warehouse_port,
host=dbHost, user=dbUser,
password=dbPass)
self.data_mart = AlchemyAccess(database=data_mart,
host=dbHost, port=mart_port,
user=dbUser, password=dbPass)
self.queue = queue
self.name = 'worker-%d' % procNumber
self.verbosity = verbosity
# Instantiate a SelectOrInsert tool for each provided lock,
# named for the table it's protecting.
# See `longitudinal_manager` for nomenclature
for table, lock in table_locks.items():
setattr(self, table,
SelectOrInsert(lock, self.data_mart.session))
if self.queue:
logging.info("%s: launching", self.name)
self.run()
def run(self):
while True:
startTime = time()
# Grab an available visit_id off the queue
visit_id = self.queue.get()
try:
self.dedupVisit(visit_id)
logging.debug("%s: Merged %s in %s seconds", self.name,
visit_id, time() - startTime)
# Every 100 visits log what's left
whats_left = self.queue.qsize()
if whats_left and whats_left % 100 == 0:
logging.info("%d visits yet to process", whats_left)
except IntegrityError, i:
logging.exception("%s: CRITICAL IntegrityError "
"caught on visit %s : %s",
self.name, visit_id, i)
# rollback the transaction - otherwise this worker is
# left with a useless session
logging.info("%s: Rolling back visit %s",
self.name, visit_id)
self.data_mart.session.rollback()
except OperationalError, i:
logging.exception("%s: CRITICAL OperationalError "
"caught on visit %s : %s",
self.name, visit_id, i)
# rollback the transaction - otherwise this worker is
# left with a useless session
logging.info("%s: Rolling back visit %s",
self.name, visit_id)
self.data_mart.session.rollback()
except Exception, e:
logging.exception("%s: CRITICAL Exception caught on "\
"visit %s : %s",
self.name, visit_id, e)
if not inProduction():
raise e
else:
self.data_mart.session.rollback()
finally:
# Mark this one done in the queue regardless of
# success so we don't hang the process - it doesn't
# get marked done in the db unless it did complete, so
# it'll continue to get picked up next run till the
# error is addressed.
self.queue.task_done()
# Clean up if we appear to be done.
if self.queue.empty():
self.tearDown()
def tearDown(self):
"""tearDown this worker, free resources peacefully
The manager should call this once the queue is empty so
open connections can be peacefully shutdown.
"""
self.data_warehouse.disconnect()
self.data_mart.disconnect()
logging.info("%s: tearing down", self.name)
def _handle_new_visit(self, message):
"""Local helper to handle a new visit
Adds the visit to self._surrogates keyed by patient_class.
It is the callers responsibility to persist the object.
:param message: The HL7 message being processed, evidently the
first for this (visit_id, patient_class).
returns the new visit, also set in self._surrogates[pc]
"""
v = message.visit
visit = Visit(visit_id=v.visit_id,
patient_class=v.patient_class,
patient_id=v.patient_id,
admit_datetime=v.admit_datetime,
first_message=message.message_datetime,
last_message=message.message_datetime,
dim_facility_pk=message.facility)
self._set_surrogate(v.patient_class, visit)
return visit
def _commit_visit(self, visit, forceUpdate=False):
"""Persist the deduplicated visit if necessary
Roundtrip is skipped unless forced or necessary.
:param visit: The Visit with all the merged / updated and
related values set.
:param forceUpdate: Used when associated tables get updates,
we maintain the last_updated value.
"""
if forceUpdate or self.data_mart.session.\
is_modified(visit, include_collections=True,
passive=True):
visit.last_updated = datetime.now()
self.data_mart.session.commit()
logging.info("%s: commit merged ER visit %s with "\
"admit_datetime %s", self.name,
visit.visit_id, visit.admit_datetime)
else:
logging.debug("%s: skipped commit(), '%s' doesn't look "\
"dirty", self.name, visit.visit_id)
def _new_labs(self, query):
"""Local helper used to filter and chunk labs
Labs are messy coming in, Each OBR can have any number of OBX
statments, which may or may not define new replies. This
method takes the prebuilt query and chunks up the labs using
the rules within - essentially relying on the value of
obx.sequence to define continuation or a new result (lab).
Labs also have associated segments, such as specimen source
(SPM) and notes (NTE). The later of which can be split over
multiple segments, and associated either with the OBR or OBX.
The other task here is to filter out non lab data, such as
clinical information.
:param query: prepared sqlalchemy query to return the list of
`ObservationData` objects, containing obx and associated obr
messages for consideration
returns list containing a SurrogateLab for each lab result
defined. No checks are done within to assure these labs
haven't already been linked, but the order should be intact to
assure the first in the list were the first defined.
"""
new_labs = list()
# Use a NextLabState instance to manage the new_labs index.
# See `NextLabState` for increment logic.
transition_tool = NextLabState()
#last_sig, active_lab_index = 0, -1
for observation in query:
transition_tool.transition_new_obr()
#active_lab_index += 1
for obx in observation.obxes:
# Prefer the obx values; fall back to obr
best_code, best_text, coding =\
_preferred_lab_data(observation, obx)
result = stripXML(obx.observation_result)
transition_tool.transition_new_obx(sequence=obx.sequence,
code=best_code)
if len(new_labs) == transition_tool.index:
# Dealing w/ new lab, populate what we know.
code = best_code
text = best_text
collection_dt = observation.observation_datetime
report_dt = observation.report_datetime
if observation.status == 'A':
assert(obx.result_status == 'A' or\
obx.result_status is None)
lab_flag = _preferred_lab_flag(obx)
new_labs.append(
SurrogateLab(
test_code=code,
test_text=text,
coding=coding,
result=result,
units=obx.units,
status=observation.status,
collection_datetime=collection_dt,
report_datetime=report_dt,
lab_flag=lab_flag,
specimen_source=observation.specimen_source,
performing_lab=obx.performing_lab_code,
order_number=observation.filler_order_no,
reference_range=obx.reference_range,
hl7_obr_id=observation.hl7_obr_id,
hl7_obx_id=obx.hl7_obx_id))
else:
# Confirm we didn't walk off the end
assert(transition_tool.index == len(new_labs) - 1)
# Continuation of lab - concatinate this result
new_labs[transition_tool.index].append_result(
result=result, hl7_obx_id=obx.hl7_obx_id)
#Now need to fetch and re-associate notes
self._associate_notes(new_labs)
return new_labs
def _associate_notes(self, labs):
"""Helper to lookup and associate any available notes for labs
Due to the nature of notes (HL7 NTE segments), the data
warehouse associations are maintained by using either
hl7_obr_ids or hl7_obx_ids as foreign keys. This complexity
is necessary as a single HL7 message containing a number of
observation results may have notes associated with the
observation request (HL7 OBR segment) and/or any number of the
observation results (HL7 OBX segments), each of which may span
multiple segments itself.
This method queries the datawarehouse for any available note
associations, and pushes the results back into the
SurrogateLabs provided.
:param labs: list of SurrogateLab objects potentially needing
notes. Modified if any related notes are found.
"""
hl7_obr_ids = [lab.hl7_obr_id for lab in labs]
hl7_obx_ids = [i for lab in labs for i in lab.hl7_obx_ids]
sq = self.data_warehouse.session.query
query = sq(HL7_Nte).\
filter(or_(HL7_Nte.hl7_obr_id.in_(hl7_obr_ids),
HL7_Nte.hl7_obx_id.in_(hl7_obx_ids))).\
order_by(HL7_Nte.hl7_obr_id,
HL7_Nte.hl7_obx_id,
HL7_Nte.sequence_number)
#Build up notes from potential set of segments, maintaining
#same mapping index key as used in id_map
found_notes = dict()
for note_segment in query:
if note_segment.hl7_obx_id is not None:
index = obx_index(note_segment.hl7_obx_id, labs)
else:
index = obr_index(note_segment.hl7_obr_id, labs)
if index in found_notes:
found_notes[index].append(note_segment.note)
else:
found_notes[index] = [note_segment.note, ]
#Push the note associations back into the labs
for index, note_list in found_notes.items():
note = ' '.join([n for n in note_list if n])
if note:
labs[index].set_note(note)
def _add_observations(self, observation_messages):
"""Local helper to add related observations data to surrogates
:param observation_messages: list of new messages for
consideration
"""
msh_ids = [m.hl7_msh_id for m in observation_messages]
sq = self.data_warehouse.session.query
# LOINC '43140-3' == "CLINICAL FINDING PRESENT" - not lab data
# Remember SQL null handling is odd, loinc_code != 'x'
# excludes undefined loinc_codes.
query = sq(ObservationData).\
filter(and_(ObservationData.hl7_msh_id.in_(msh_ids),
or_(ObservationData.loinc_code !=
'43140-3',
ObservationData.loinc_code == None)))
new_labs = self._new_labs(query)
if new_labs:
# Add the new labs to _all_ surrogates, as labs don't
# contain a patient class association
for sv in self._surrogates.values():
sv.labs = new_labs
def _query_messages_to_merge(self, visit_id):
"""Local helper to obtain all the new visit info.
- visit_id : The visit_id actively being deduplicated.
This should return the FullMessage data for any messages new
to this visit since last run.
returns the messages oldest to newest so the most recent info
'updates' what was previously known.
"""
dmq = self.data_mart.session.query
ids = dmq(MessageProcessed.hl7_msh_id).\
filter(and_(MessageProcessed.visit_id == visit_id,
MessageProcessed.processed_datetime ==
None)).all()
msg_ids = [id[0] for id in ids]
sq = self.data_warehouse.session.query
return sq(FullMessage).\
filter(FullMessage.hl7_msh_id.in_(msg_ids)).\
order_by(FullMessage.message_datetime)
def _calculateAge(self, visit):
"""Calculate the age for visit if not already defined
Uses the dob and admit_datetime for approximate value in
years, if the preferred method (see `SurrogatePatientAge`)
didn't succeed.
"""
if visit.age is not None:
return
if visit.dob is None:
logging.debug("%s: DOB not defined for visit '%s', "\
"can't calculate age", self.name,
visit.visit_id)
return
if visit.admit_datetime is None:
logging.warn("%s: admit_datetime not defined for visit "\
"'%s', can't calculate age", self.name,
visit.visit_id)
return
visit.age = getYearDiff(getDobDatetime(visit.dob),
visit.admit_datetime)
# Look out for case where newborn arrives before the average
# used (15th of month) in calculating the DOB from M/Y.
if visit.age == -1:
visit.age = 0
def _get_surrogate(self, patient_class):
return self._surrogates.get(patient_class)
def _load_surrogates(self, visit_id):
"""load the existing visits for this visit_id
Query the database for all patient classes on this visit_id.
The results are stored in self._surrogates key'd by
patient_class.
:param visit_id: the visit_id to query for
"""
self._surrogates = {}
sq = self.data_mart.session.query
query = sq(Visit).\
filter(Visit.visit_id == visit_id)
for v in query:
self._surrogates[v.patient_class] =\
SurrogateVisit(self, v)
def _set_surrogate(self, patient_class, visit):
"""set new visits not found via `_load_surrogates`
:param patient_class: The single character representing the
patient class for the visit, i.e. 'E' for ER
:param visit: The newly formed DBO instance
"""
assert not self._get_surrogate(patient_class)
self._surrogates[patient_class] = SurrogateVisit(self, visit)
def dedupVisit(self, visit_id):
""" Process a single visit_id - grab all associated data and
merge any new info into the visit_state table.
This may result in more than one row in the visit
table, as each unique (visit_id, patient_class) is treated
separately.
"""
if None and visit_id.startswith('id to debug'):
pdb_hook()
# Load any existing longitudinal visits for this id. (Likely
# to need them all if they exist for observation connections).
self._load_surrogates(visit_id)
# Observation messages are dealt with after we have all the
# respective patient_class visits built out - collect as we go.
observation_messages = []
clinical_messages = []
query = self._query_messages_to_merge(visit_id)
no_class_min_message_datetime = None
no_class_max_message_datetime = None
for message in query:
if message.message_type == 'ORM^O01^ORM_O01':
# Nothing of value at this time in order messages
continue
message_datetime = message.message_datetime
pc = message.visit.patient_class
if message.message_type == 'ORU^R01^ORU_R01':
no_class_max_message_datetime = max(
message_datetime, no_class_max_message_datetime)
no_class_min_message_datetime = min(
message_datetime, no_class_min_message_datetime)
if pc not in ('E', 'I', 'O'):
observation_messages.append(message)
else:
# This is a shortcut, using the lack of a patient
# class to determine if it's clinical data - saves
# the database hit. We still associate all
# clinical_messages with all patient classes like
# observation_messages
clinical_messages.append(message)
continue
# Don't create a new visit (on patient class) if the pc is
# 'U' (unknown). If we only have one patient class, we
# can safely assume it's the same visit - in any other
# case, log and toss this one.
if pc == 'U':
if len(self._surrogates) == 1:
pc = self._surrogates.keys()[0]
else:
logging.error("'U' patient class on message '%s' "
"for visit '%s' which has multiple "
"patient classes, don't know which "
"to associate data with",
message.message_control_id,
visit_id)
continue
sv = self._get_surrogate(pc)
longitudinal_visit = sv.visit if sv else None
if longitudinal_visit is None:
longitudinal_visit = self._handle_new_visit(message)
longitudinal_visit.first_message = min(
message_datetime, longitudinal_visit.first_message)
longitudinal_visit.last_message = max(
message_datetime, longitudinal_visit.last_message)
# Don't "update" with older messages
if message_datetime < longitudinal_visit.last_message:
logging.warn("skipping what looks like a stale, " +\
"duplicate message '%s' for visit '%s'",
message.message_control_id, visit_id)
continue
# Handle the columns with values in fact_visit not already
# processed (via new visit or message times)
for field in ('admit_datetime', 'discharge_datetime',
'gender', 'dob', 'disposition'):
value = getattr(message.visit, field, None)
b4 = getattr(longitudinal_visit, field, None)
if value and value != b4:
setattr(longitudinal_visit, field, value)
# Demographics
demographic_values = dict([(field, getattr(message.visit,
field, None))
for field in ('zip', 'country', 'state',
'county')])
if any(demographic_values.values()):
self._surrogates[pc].location =\
Location(**demographic_values)
a_s = getattr(message.visit, 'admission_source', None)
if a_s:
self._surrogates[pc].admission_source = a_s
al = getattr(message.visit, 'assigned_patient_location', None)
if al:
self._surrogates[pc].assigned_location = al
cc = getattr(message.visit, 'chief_complaint', None)
if cc and cc.strip():
self._surrogates[pc].admit_reason = cc
self._surrogates[pc].chief_complaint = cc
dis = getattr(message.visit, 'disposition', None)
if dis:
self._surrogates[pc].disposition = dis
race = getattr(message.visit, 'race', None)
if race:
self._surrogates[pc].race = race
sa = getattr(message.visit, 'service_code', None)
if sa:
self._surrogates[pc].service_area = sa
for dx in message.dxes:
# the HL/7 stream does include some blanks that are of
# no value - skip if we don't at least have an icd9
if dx.dx_code is None or len(dx.dx_code.strip()) == 0:
continue
# dx_datetime is never populated in the HL/7 stream
# agreed to use the message datetime as an approximation
self._surrogates[pc].add_diagnosis(\
rank=dx.rank,
icd9=dx.dx_code,
description=dx.dx_description,
status=dx.dx_type,
dx_datetime=message_datetime)
for obx in message.obxes:
test_code = obx.observation_id
if test_code in clinical_codes_of_interest:
self._surrogates[pc].add_clinical_info(
test_code=test_code,
result=obx.observation_result,
units=obx.units)
# At this point, we must have an admit_datetime for every
# longitudinal visit created above. It turns out we
# occasionally get a visit without a valid time - Mike
# T. reports these are canceled visits. If found, log and
# move on.
for pc, sv in self._surrogates.items():
if sv.visit.admit_datetime is None:
logging.warn("Visit %s : %s lacks the "
"required admit_datetime field",
visit_id, pc)
# Have to mark it, or we'll keep retrying every time.
update = """UPDATE internal_message_processed SET
processed_datetime = '%s' WHERE processed_datetime IS
NULL AND visit_id = '%s' """ % (datetime.now(), visit_id)
self.data_mart.engine.execute(update)
return
else:
if sv.visit.pk is None:
self.data_mart.session.add(sv.visit)
self.data_mart.session.commit()
logging.debug("%s: new visit added '%s'", self.name,
sv.visit.visit_id)
# See if there is any new lab data to handle. Lab data
# doesn't contain patient class info and must be associated
# with all visits regardless of patient class
if observation_messages and self._surrogates:
if None and visit_id.startswith('id to debug'):
pdb_hook()
self._add_observations(observation_messages)
# The patient class in the observation messages doesn't appear
# to be reliable. Agreed to associate any clinical data with
# all matching visit_ids regardless of patient class
if clinical_messages and self._surrogates:
for message in clinical_messages:
for obx in message.obxes:
test_code = obx.observation_id
if test_code in clinical_codes_of_interest:
for sv in self._surrogates.values():
sv.add_clinical_info(
test_code=test_code,
result=obx.observation_result,
units=obx.units)
# Commit changes if needed.
for sv in self._surrogates.values():
# First, associate any dimensions created above
related_changes = sv.establish_associations()
# adjust first/last datetimes if we picked up one w/o a pc
sv.visit.first_message = min(sv.visit.first_message,
no_class_min_message_datetime)
sv.visit.last_message = max(sv.visit.last_message,
no_class_max_message_datetime)
self._calculateAge(sv.visit) # in case it wasn't provided
self._commit_visit(sv.visit, related_changes)
# Mark those rows as processed
self.data_mart.engine.execute("""UPDATE internal_message_processed SET
processed_datetime = '%s' WHERE processed_datetime IS NULL AND visit_id
= '%s' """ % (datetime.now(), visit_id))
|
pbugni/pheme.longitudinal
|
pheme/longitudinal/longitudinal_worker.py
|
Python
|
bsd-3-clause
| 69,014
|
[
"VisIt"
] |
75d0b65c239ea942d74b070447a51e11a1c2621768d38fa309bdded5844cf1f5
|
# #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Set of file tools.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import os
import re
import shutil
import stat
import time
import urllib2
import zlib
from vsc.utils import fancylogger
import easybuild.tools.environment as env
from easybuild.tools.build_log import print_msg # import build_log must stay, to activate use of EasyBuildLog
from easybuild.tools.config import build_option
from easybuild.tools import run
_log = fancylogger.getLogger('filetools', fname=False)
# easyblock class prefix
EASYBLOCK_CLASS_PREFIX = 'EB_'
# character map for encoding strings
STRING_ENCODING_CHARMAP = {
r' ': "_space_",
r'!': "_exclamation_",
r'"': "_quotation_",
r'#': "_hash_",
r'$': "_dollar_",
r'%': "_percent_",
r'&': "_ampersand_",
r'(': "_leftparen_",
r')': "_rightparen_",
r'*': "_asterisk_",
r'+': "_plus_",
r',': "_comma_",
r'-': "_minus_",
r'.': "_period_",
r'/': "_slash_",
r':': "_colon_",
r';': "_semicolon_",
r'<': "_lessthan_",
r'=': "_equals_",
r'>': "_greaterthan_",
r'?': "_question_",
r'@': "_atsign_",
r'[': "_leftbracket_",
r'\'': "_apostrophe_",
r'\\': "_backslash_",
r']': "_rightbracket_",
r'^': "_circumflex_",
r'_': "_underscore_",
r'`': "_backquote_",
r'{': "_leftcurly_",
r'|': "_verticalbar_",
r'}': "_rightcurly_",
r'~': "_tilde_",
}
try:
# preferred over md5/sha modules, but only available in Python 2.5 and more recent
import hashlib
md5_class = hashlib.md5
sha1_class = hashlib.sha1
except ImportError:
import md5, sha
md5_class = md5.md5
sha1_class = sha.sha
# default checksum for source and patch files
DEFAULT_CHECKSUM = 'md5'
# map of checksum types to checksum functions
CHECKSUM_FUNCTIONS = {
'md5': lambda p: calc_block_checksum(p, md5_class()),
'sha1': lambda p: calc_block_checksum(p, sha1_class()),
'adler32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.adler32)),
'crc32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.crc32)),
'size': lambda p: os.path.getsize(p),
}
class ZlibChecksum(object):
"""
wrapper class for adler32 and crc32 checksums to
match the interface of the hashlib module
"""
def __init__(self, algorithm):
self.algorithm = algorithm
self.checksum = algorithm(r'') # use the same starting point as the module
self.blocksize = 64 # The same as md5/sha1
def update(self, data):
"""Calculates a new checksum using the old one and the new data"""
self.checksum = self.algorithm(data, self.checksum)
def hexdigest(self):
"""Return hex string of the checksum"""
return '0x%s' % (self.checksum & 0xffffffff)
def read_file(path, log_error=True):
"""Read contents of file at given path, in a robust way."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
f = open(path, 'r')
txt = f.read()
f.close()
return txt
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
if log_error:
_log.error("Failed to read %s: %s" % (path, err))
else:
return None
def write_file(path, txt, append=False):
"""Write given contents to file at given path (overwrites current file contents!)."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
mkdir(os.path.dirname(path), parents=True)
if append:
f = open(path, 'a')
else:
f = open(path, 'w')
f.write(txt)
f.close()
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
_log.error("Failed to write to %s: %s" % (path, err))
def remove_file(path):
"""Remove file at specified path."""
try:
if os.path.exists(path):
os.remove(path)
except OSError, err:
_log.error("Failed to remove %s: %s", path, err)
def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False):
"""
Given filename fn, try to extract in directory dest
- returns the directory name in case of success
"""
if not os.path.isfile(fn):
_log.error("Can't extract file %s: no such file" % fn)
mkdir(dest, parents=True)
# use absolute pathnames from now on
absDest = os.path.abspath(dest)
# change working directory
try:
_log.debug("Unpacking %s in directory %s." % (fn, absDest))
os.chdir(absDest)
except OSError, err:
_log.error("Can't change to directory %s: %s" % (absDest, err))
if not cmd:
cmd = extract_cmd(fn, overwrite=overwrite)
else:
# complete command template with filename
cmd = cmd % fn
if not cmd:
_log.error("Can't extract file %s with unknown filetype" % fn)
if extra_options:
cmd = "%s %s" % (cmd, extra_options)
run.run_cmd(cmd, simple=True)
return find_base_dir()
def which(cmd):
"""Return (first) path in $PATH for specified command, or None if command is not found."""
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path is command is there, and both readable and executable
if os.access(cmd_path, os.R_OK | os.X_OK):
_log.info("Command %s found at %s" % (cmd, cmd_path))
return cmd_path
_log.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)" % (cmd, paths))
return None
def det_common_path_prefix(paths):
"""Determine common path prefix for a given list of paths."""
if not isinstance(paths, list):
_log.error("det_common_path_prefix: argument must be of type list (got %s: %s)" % (type(paths), paths))
elif not paths:
return None
# initial guess for common prefix
prefix = paths[0]
found_common = False
while not found_common and prefix != os.path.dirname(prefix):
prefix = os.path.dirname(prefix)
found_common = all([p.startswith(prefix) for p in paths])
if found_common:
# prefix may be empty string for relative paths with a non-common prefix
return prefix.rstrip(os.path.sep) or None
else:
return None
def download_file(filename, url, path):
"""Download a file from the given URL, to the specified path."""
_log.debug("Trying to download %s from %s to %s", filename, url, path)
timeout = build_option('download_timeout')
if timeout is None:
# default to 10sec timeout if none was specified
# default system timeout (used is nothing is specified) may be infinite (?)
timeout = 10
_log.debug("Using timeout of %s seconds for initiating download" % timeout)
# make sure directory exists
basedir = os.path.dirname(path)
mkdir(basedir, parents=True)
# try downloading, three times max.
downloaded = False
max_attempts = 3
attempt_cnt = 0
while not downloaded and attempt_cnt < max_attempts:
try:
# urllib2 does the right thing for http proxy setups, urllib does not!
url_fd = urllib2.urlopen(url, timeout=timeout)
_log.debug('response code for given url %s: %s' % (url, url_fd.getcode()))
write_file(path, url_fd.read())
_log.info("Downloaded file %s from url %s to %s" % (filename, url, path))
downloaded = True
url_fd.close()
except urllib2.HTTPError as err:
if 400 <= err.code <= 499:
_log.warning("URL %s was not found (HTTP response code %s), not trying again" % (url, err.code))
break
else:
_log.warning("HTTPError occured while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except IOError as err:
_log.warning("IOError occurred while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except Exception, err:
_log.error("Unexpected error occurred when trying to download %s to %s: %s" % (url, path, err))
if not downloaded and attempt_cnt < max_attempts:
_log.info("Attempt %d of downloading %s to %s failed, trying again..." % (attempt_cnt, url, path))
if downloaded:
_log.info("Successful download of file %s from url %s to path %s" % (filename, url, path))
return path
else:
_log.warning("Download of %s to %s failed, done trying" % (url, path))
return None
def find_easyconfigs(path, ignore_dirs=None):
"""
Find .eb easyconfig files in path
"""
if os.path.isfile(path):
return [path]
if ignore_dirs is None:
ignore_dirs = []
# walk through the start directory, retain all files that end in .eb
files = []
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for f in filenames:
if not f.endswith('.eb') or f == 'TEMPLATE.eb':
continue
spec = os.path.join(dirpath, f)
_log.debug("Found easyconfig %s" % spec)
files.append(spec)
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
return files
def search_file(paths, query, short=False, ignore_dirs=None, silent=False):
"""
Search for a particular file (only prints)
"""
if ignore_dirs is None:
ignore_dirs = ['.git', '.svn']
if not isinstance(ignore_dirs, list):
_log.error("search_file: ignore_dirs (%s) should be of type list, not %s" % (ignore_dirs, type(ignore_dirs)))
var_lines = []
hit_lines = []
var_index = 1
var = None
for path in paths:
hits = []
hit_in_path = False
print_msg("Searching (case-insensitive) for '%s' in %s " % (query, path), log=_log, silent=silent)
query = query.lower()
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
for filename in filenames:
filename = os.path.join(dirpath, filename)
if filename.lower().find(query) != -1:
if not hit_in_path:
var = "CFGS%d" % var_index
var_index += 1
hit_in_path = True
hits.append(filename)
# do not consider (certain) hidden directories
# note: we still need to consider e.g., .local !
# replace list elements using [:], so os.walk doesn't process deleted directories
# see http://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
if hits:
common_prefix = det_common_path_prefix(hits)
if short and common_prefix is not None and len(common_prefix) > len(var) * 2:
var_lines.append("%s=%s" % (var, common_prefix))
hit_lines.extend([" * %s" % os.path.join('$%s' % var, fn[len(common_prefix) + 1:]) for fn in hits])
else:
hit_lines.extend([" * %s" % fn for fn in hits])
for line in var_lines + hit_lines:
print_msg(line, log=_log, silent=silent, prefix=False)
def compute_checksum(path, checksum_type=DEFAULT_CHECKSUM):
"""
Compute checksum of specified file.
@param path: Path of file to compute checksum for
@param checksum_type: Type of checksum ('adler32', 'crc32', 'md5' (default), 'sha1', 'size')
"""
if not checksum_type in CHECKSUM_FUNCTIONS:
_log.error("Unknown checksum type (%s), supported types are: %s" % (checksum_type, CHECKSUM_FUNCTIONS.keys()))
try:
checksum = CHECKSUM_FUNCTIONS[checksum_type](path)
except IOError, err:
_log.error("Failed to read %s: %s" % (path, err))
except MemoryError, err:
_log.warning("A memory error occured when computing the checksum for %s: %s" % (path, err))
checksum = 'dummy_checksum_due_to_memory_error'
return checksum
def calc_block_checksum(path, algorithm):
"""Calculate a checksum of a file by reading it into blocks"""
# We pick a blocksize of 16 MB: it's a multiple of the internal
# blocksize of md5/sha1 (64) and gave the best speed results
try:
# in hashlib, blocksize is a class parameter
blocksize = algorithm.blocksize * 262144 # 2^18
except AttributeError, err:
blocksize = 16777216 # 2^24
_log.debug("Using blocksize %s for calculating the checksum" % blocksize)
try:
f = open(path, 'rb')
for block in iter(lambda: f.read(blocksize), r''):
algorithm.update(block)
f.close()
except IOError, err:
_log.error("Failed to read %s: %s" % (path, err))
return algorithm.hexdigest()
def verify_checksum(path, checksums):
"""
Verify checksum of specified file.
@param file: path of file to verify checksum of
@param checksum: checksum value (and type, optionally, default is MD5), e.g., 'af314', ('sha', '5ec1b')
"""
# if no checksum is provided, pretend checksum to be valid
if checksums is None:
return True
# make sure we have a list of checksums
if not isinstance(checksums, list):
checksums = [checksums]
for checksum in checksums:
if isinstance(checksum, basestring):
# default checksum type unless otherwise specified is MD5 (most common(?))
typ = DEFAULT_CHECKSUM
elif isinstance(checksum, tuple) and len(checksum) == 2:
typ, checksum = checksum
else:
_log.error("Invalid checksum spec '%s', should be a string (MD5) or 2-tuple (type, value)." % checksum)
actual_checksum = compute_checksum(path, typ)
_log.debug("Computed %s checksum for %s: %s (correct checksum: %s)" % (typ, path, actual_checksum, checksum))
if actual_checksum != checksum:
return False
# if we land here, all checksums have been verified to be correct
return True
def find_base_dir():
"""
Try to locate a possible new base directory
- this is typically a single subdir, e.g. from untarring a tarball
- when extracting multiple tarballs in the same directory,
expect only the first one to give the correct path
"""
def get_local_dirs_purged():
# e.g. always purge the log directory
ignoreDirs = ["easybuild"]
lst = os.listdir(os.getcwd())
for ignDir in ignoreDirs:
if ignDir in lst:
lst.remove(ignDir)
return lst
lst = get_local_dirs_purged()
new_dir = os.getcwd()
while len(lst) == 1:
new_dir = os.path.join(os.getcwd(), lst[0])
if not os.path.isdir(new_dir):
break
try:
os.chdir(new_dir)
except OSError, err:
_log.exception("Changing to dir %s from current dir %s failed: %s" % (new_dir, os.getcwd(), err))
lst = get_local_dirs_purged()
# make sure it's a directory, and not a (single) file that was in a tarball for example
while not os.path.isdir(new_dir):
new_dir = os.path.dirname(new_dir)
_log.debug("Last dir list %s" % lst)
_log.debug("Possible new dir %s found" % new_dir)
return new_dir
def extract_cmd(filepath, overwrite=False):
"""
Determines the file type of file fn, returns extract cmd
- based on file suffix
- better to use Python magic?
"""
filename = os.path.basename(filepath)
exts = [x.lower() for x in filename.split('.')]
target = '.'.join(exts[:-1])
cmd_tmpl = None
# gzipped or gzipped tarball
if exts[-1] in ['gz']:
if exts[-2] in ['tar']:
# unzip .tar.gz in one go
cmd_tmpl = "tar xzf %(filepath)s"
else:
cmd_tmpl = "gunzip -c %(filepath)s > %(target)s"
elif exts[-1] in ['tgz', 'gtgz']:
cmd_tmpl = "tar xzf %(filepath)s"
# bzipped or bzipped tarball
elif exts[-1] in ['bz2']:
if exts[-2] in ['tar']:
cmd_tmpl = 'tar xjf %(filepath)s'
else:
cmd_tmpl = "bunzip2 %(filepath)s"
elif exts[-1] in ['tbz', 'tbz2', 'tb2']:
cmd_tmpl = "tar xjf %(filepath)s"
# xzipped or xzipped tarball
elif exts[-1] in ['xz']:
if exts[-2] in ['tar']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
else:
cmd_tmpl = "unxz %(filepath)s"
elif exts[-1] in ['txz']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
# tarball
elif exts[-1] in ['tar']:
cmd_tmpl = "tar xf %(filepath)s"
# zip file
elif exts[-1] in ['zip']:
if overwrite:
cmd_tmpl = "unzip -qq -o %(filepath)s"
else:
cmd_tmpl = "unzip -qq %(filepath)s"
if cmd_tmpl is None:
_log.error('Unknown file type for file %s (%s)' % (filepath, exts))
return cmd_tmpl % {'filepath': filepath, 'target': target}
def det_patched_files(path=None, txt=None, omit_ab_prefix=False):
"""Determine list of patched files from a patch."""
# expected format: "+++ path/to/patched/file"
# also take into account the 'a/' or 'b/' prefix that may be used
patched_regex = re.compile(r"^\s*\+{3}\s+(?P<ab_prefix>[ab]/)?(?P<file>\S+)", re.M)
if path is not None:
try:
f = open(path, 'r')
txt = f.read()
f.close()
except IOError, err:
_log.error("Failed to read patch %s: %s" % (path, err))
elif txt is None:
_log.error("Either a file path or a string representing a patch should be supplied to det_patched_files")
patched_files = []
for match in patched_regex.finditer(txt):
patched_file = match.group('file')
if not omit_ab_prefix and match.group('ab_prefix') is not None:
patched_file = match.group('ab_prefix') + patched_file
if patched_file in ['/dev/null']:
_log.debug("Ignoring patched file %s" % patched_file)
else:
patched_files.append(patched_file)
return patched_files
def guess_patch_level(patched_files, parent_dir):
"""Guess patch level based on list of patched files and specified directory."""
patch_level = None
for patched_file in patched_files:
# locate file by stripping of directories
tf2 = patched_file.split(os.path.sep)
n_paths = len(tf2)
path_found = False
level = None
for level in range(n_paths):
if os.path.isfile(os.path.join(parent_dir, *tf2[level:])):
path_found = True
break
if path_found:
patch_level = level
break
else:
_log.debug('No match found for %s, trying next patched file...' % patched_file)
return patch_level
def apply_patch(patch_file, dest, fn=None, copy=False, level=None):
"""
Apply a patch to source code in directory dest
- assume unified diff created with "diff -ru old new"
"""
if not os.path.isfile(patch_file):
_log.error("Can't find patch %s: no such file" % patch_file)
return
if fn and not os.path.isfile(fn):
_log.error("Can't patch file %s: no such file" % fn)
return
if not os.path.isdir(dest):
_log.error("Can't patch directory %s: no such directory" % dest)
return
# copy missing files
if copy:
try:
shutil.copy2(patch_file, dest)
_log.debug("Copied patch %s to dir %s" % (patch_file, dest))
return 'ok'
except IOError, err:
_log.error("Failed to copy %s to dir %s: %s" % (patch_file, dest, err))
return
# use absolute paths
apatch = os.path.abspath(patch_file)
adest = os.path.abspath(dest)
if not level:
# guess value for -p (patch level)
# - based on +++ lines
# - first +++ line that matches an existing file determines guessed level
# - we will try to match that level from current directory
patched_files = det_patched_files(path=apatch)
if not patched_files:
_log.error("Can't guess patchlevel from patch %s: no testfile line found in patch" % apatch)
return
patch_level = guess_patch_level(patched_files, adest)
if patch_level is None: # patch_level can also be 0 (zero), so don't use "not patch_level"
# no match
_log.error("Can't determine patch level for patch %s from directory %s" % (patch_file, adest))
else:
_log.debug("Guessed patch level %d for patch %s" % (patch_level, patch_file))
else:
patch_level = level
_log.debug("Using specified patch level %d for patch %s" % (patch_level, patch_file))
try:
os.chdir(adest)
_log.debug("Changing to directory %s" % adest)
except OSError, err:
_log.error("Can't change to directory %s: %s" % (adest, err))
return
patch_cmd = "patch -b -p%d -i %s" % (patch_level, apatch)
result = run.run_cmd(patch_cmd, simple=True)
if not result:
_log.error("Patching with patch %s failed" % patch_file)
return
return result
def modify_env(old, new):
"""NO LONGER SUPPORTED: use modify_env from easybuild.tools.environment instead"""
_log.nosupport("moved modify_env to easybuild.tools.environment", "2.0")
def convert_name(name, upper=False):
"""
Converts name so it can be used as variable name
"""
# no regexps
charmap = {
'+': 'plus',
'-': 'min'
}
for ch, new in charmap.items():
name = name.replace(ch, new)
if upper:
return name.upper()
else:
return name
def adjust_permissions(name, permissionBits, add=True, onlyfiles=False, onlydirs=False, recursive=True,
group_id=None, relative=True, ignore_errors=False):
"""
Add or remove (if add is False) permissionBits from all files (if onlydirs is False)
and directories (if onlyfiles is False) in path
"""
name = os.path.abspath(name)
if recursive:
_log.info("Adjusting permissions recursively for %s" % name)
allpaths = [name]
for root, dirs, files in os.walk(name):
paths = []
if not onlydirs:
paths += files
if not onlyfiles:
paths += dirs
for path in paths:
allpaths.append(os.path.join(root, path))
else:
_log.info("Adjusting permissions for %s" % name)
allpaths = [name]
failed_paths = []
fail_cnt = 0
for path in allpaths:
try:
if relative:
# relative permissions (add or remove)
perms = os.stat(path)[stat.ST_MODE]
if add:
os.chmod(path, perms | permissionBits)
else:
os.chmod(path, perms & ~permissionBits)
else:
# hard permissions bits (not relative)
os.chmod(path, permissionBits)
if group_id:
# only change the group id if it the current gid is different from what we want
cur_gid = os.stat(path).st_gid
if not cur_gid == group_id:
_log.debug("Changing group id of %s to %s" % (path, group_id))
os.chown(path, -1, group_id)
else:
_log.debug("Group id of %s is already OK (%s)" % (path, group_id))
except OSError, err:
if ignore_errors:
# ignore errors while adjusting permissions (for example caused by bad links)
_log.info("Failed to chmod/chown %s (but ignoring it): %s" % (path, err))
fail_cnt += 1
else:
failed_paths.append(path)
if failed_paths:
_log.error("Failed to chmod/chown several paths: %s (last error: %s)" % (failed_paths, err))
# we ignore some errors, but if there are to many, something is definitely wrong
fail_ratio = fail_cnt / float(len(allpaths))
max_fail_ratio = 0.5
if fail_ratio > max_fail_ratio:
_log.error("%.2f%% of permissions/owner operations failed (more than %.2f%%), something must be wrong..." %
(100 * fail_ratio, 100 * max_fail_ratio))
elif fail_cnt > 0:
_log.debug("%.2f%% of permissions/owner operations failed, ignoring that..." % (100 * fail_ratio))
def patch_perl_script_autoflush(path):
# patch Perl script to enable autoflush,
# so that e.g. run_cmd_qa receives all output to answer questions
txt = read_file(path)
origpath = "%s.eb.orig" % path
write_file(origpath, txt)
_log.debug("Patching Perl script %s for autoflush, original script copied to %s" % (path, origpath))
# force autoflush for Perl print buffer
lines = txt.split('\n')
newtxt = '\n'.join([
lines[0], # shebang line
"\nuse IO::Handle qw();",
"STDOUT->autoflush(1);\n", # extra newline to separate from actual script
] + lines[1:])
write_file(path, newtxt)
def mkdir(path, parents=False, set_gid=None, sticky=None):
"""
Create a directory
Directory is the path to create
@param parents: create parent directories if needed (mkdir -p)
@param set_gid: set group ID bit, to make subdirectories and files inherit group
@param sticky: set the sticky bit on this directory (a.k.a. the restricted deletion flag),
to avoid users can removing/renaming files in this directory
"""
if set_gid is None:
set_gid = build_option('set_gid_bit')
if sticky is None:
sticky = build_option('sticky_bit')
if not os.path.isabs(path):
path = os.path.abspath(path)
# exit early if path already exists
if not os.path.exists(path):
tup = (path, parents, set_gid, sticky)
_log.info("Creating directory %s (parents: %s, set_gid: %s, sticky: %s)" % tup)
# set_gid and sticky bits are only set on new directories, so we need to determine the existing parent path
existing_parent_path = os.path.dirname(path)
try:
if parents:
# climb up until we hit an existing path or the empty string (for relative paths)
while existing_parent_path and not os.path.exists(existing_parent_path):
existing_parent_path = os.path.dirname(existing_parent_path)
os.makedirs(path)
else:
os.mkdir(path)
except OSError, err:
_log.error("Failed to create directory %s: %s" % (path, err))
# set group ID and sticky bits, if desired
bits = 0
if set_gid:
bits |= stat.S_ISGID
if sticky:
bits |= stat.S_ISVTX
if bits:
try:
new_subdir = path[len(existing_parent_path):].lstrip(os.path.sep)
new_path = os.path.join(existing_parent_path, new_subdir.split(os.path.sep)[0])
adjust_permissions(new_path, bits, add=True, relative=True, recursive=True, onlydirs=True)
except OSError, err:
_log.error("Failed to set groud ID/sticky bit: %s" % err)
else:
_log.debug("Not creating existing path %s" % path)
def path_matches(path, paths):
"""Check whether given path matches any of the provided paths."""
if not os.path.exists(path):
return False
for somepath in paths:
if os.path.exists(somepath) and os.path.samefile(path, somepath):
return True
return False
def rmtree2(path, n=3):
"""Wrapper around shutil.rmtree to make it more robust when used on NFS mounted file systems."""
ok = False
for i in range(0, n):
try:
shutil.rmtree(path)
ok = True
break
except OSError, err:
_log.debug("Failed to remove path %s with shutil.rmtree at attempt %d: %s" % (path, n, err))
time.sleep(2)
if not ok:
_log.error("Failed to remove path %s with shutil.rmtree, even after %d attempts." % (path, n))
else:
_log.info("Path %s successfully removed." % path)
def cleanup(logfile, tempdir, testing):
"""Cleanup the specified log file and the tmp directory"""
if not testing and logfile is not None:
os.remove(logfile)
print_msg('temporary log file %s has been removed.' % (logfile), log=None, silent=testing)
if not testing and tempdir is not None:
shutil.rmtree(tempdir, ignore_errors=True)
print_msg('temporary directory %s has been removed.' % (tempdir), log=None, silent=testing)
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copied from Lib/shutil.py in python 2.7, since we need this to work for python2.4 aswell
and this code can be improved...
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
class Error(EnvironmentError):
pass
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
_log.debug("copytree: skipping copy of %s" % ignored_names)
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def encode_string(name):
"""
This encoding function handles funky software names ad infinitum, like:
example: '0_foo+0x0x#-$__'
becomes: '0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'
The intention is to have a robust escaping mechanism for names like c++, C# et al
It has been inspired by the concepts seen at, but in lowercase style:
* http://fossies.org/dox/netcdf-4.2.1.1/escapes_8c_source.html
* http://celldesigner.org/help/CDH_Species_01.html
* http://research.cs.berkeley.edu/project/sbp/darcsrepo-no-longer-updated/src/edu/berkeley/sbp/misc/ReflectiveWalker.java
and can be extended freely as per ISO/IEC 10646:2012 / Unicode 6.1 names:
* http://www.unicode.org/versions/Unicode6.1.0/
For readability of >2 words, it is suggested to use _CamelCase_ style.
So, yes, '_GreekSmallLetterEtaWithPsiliAndOxia_' *could* indeed be a fully
valid software name; software "electron" in the original spelling anyone? ;-)
"""
# do the character remapping, return same char by default
result = ''.join(map(lambda x: STRING_ENCODING_CHARMAP.get(x, x), name))
return result
def decode_string(name):
"""Decoding function to revert result of encode_string."""
result = name
for (char, escaped_char) in STRING_ENCODING_CHARMAP.items():
result = re.sub(escaped_char, char, result)
return result
def encode_class_name(name):
"""return encoded version of class name"""
return EASYBLOCK_CLASS_PREFIX + encode_string(name)
def decode_class_name(name):
"""Return decoded version of class name."""
if not name.startswith(EASYBLOCK_CLASS_PREFIX):
# name is not encoded, apparently
return name
else:
name = name[len(EASYBLOCK_CLASS_PREFIX):]
return decode_string(name)
def run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None):
"""NO LONGER SUPPORTED: use run_cmd from easybuild.tools.run instead"""
_log.nosupport("run_cmd was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None):
"""NO LONGER SUPPORTED: use run_cmd_qa from easybuild.tools.run instead"""
_log.nosupport("run_cmd_qa was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def parse_log_for_error(txt, regExp=None, stdout=True, msg=None):
"""NO LONGER SUPPORTED: use parse_log_for_error from easybuild.tools.run instead"""
_log.nosupport("parse_log_for_error was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def det_size(path):
"""
Determine total size of given filepath (in bytes).
"""
installsize = 0
try:
# walk install dir to determine total size
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if os.path.exists(fullpath):
installsize += os.path.getsize(fullpath)
except OSError, err:
_log.warn("Could not determine install size: %s" % err)
return installsize
|
pneerincx/easybuild-framework
|
easybuild/tools/filetools.py
|
Python
|
gpl-2.0
| 36,453
|
[
"NetCDF"
] |
68ea2e4c399e62c25b25a8fc3ad3ab50692cdcb498f01c79b5ae015751742cbe
|
# -*- coding: utf-8 -*-
#
# CMS documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 14:12:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
print "PATH: {}".format(os.path.dirname(os.path.abspath('../cms/cms/')))
sys.path.insert(0, os.path.dirname(os.path.abspath('../cms/cms/')))
# -- Mock out the heavyweight pip packages, esp those that require C ----
import mock
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'scipy.stats.kde', 'matplotlib', 'matplotlib.pyplot', 'pysam',
'Bio', 'Bio.AlignIO', 'Bio.SeqIO', 'Bio.Data.IUPACData']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- Obtain GIT version --
import subprocess
def _git_version():
cmd = ['git', 'describe', '--tags', '--always'] # omit "--dirty" from doc build
out = subprocess.check_output(cmd)
if type(out) != str:
out = out.decode('utf-8')
return out.strip()
__version__ = _git_version()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CMS'
copyright = u'2015, Broad Institute'
author = u'Broad Institute'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = __version__
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d. {}'.format(release)
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CMSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CMS.tex', u'CMS Documentation',
u'Broad Institute', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cms', u'CMS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CMS', u'CMS Documentation',
author, 'CMS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
broadinstitute/cms
|
docs/conf.py
|
Python
|
bsd-2-clause
| 10,185
|
[
"pysam"
] |
1775cc28c14e9bafa4e98a6f2c7327032938539658c15fd71bbf12f3ba36de78
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import os
import json
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import AngleNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import NormalizedAngleDistanceNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import CNBiasNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SelfCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DeltaCSMNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DistanceAngleAreaNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DistanceNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DeltaDistanceNbSetWeight
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import StructureEnvironments
se_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "structure_environments_files")
class FakeNbSet:
def __init__(self, cn=None):
self.cn = cn
def __len__(self):
return self.cn
pass
class DummyStructureEnvironments:
pass
class DummyVoronoiContainer:
pass
class StrategyWeightsTest(PymatgenTest):
def test_angle_weight(self):
fake_nb_set = FakeNbSet()
dummy_se = DummyStructureEnvironments()
# Angles for a given fake nb_set with 5 neighbors
fake_nb_set.angles = [1.8595833644514066, 2.622518848090717, 3.08570351705799,
2.2695472184920042, 2.2695338778592387]
angle_weight = AngleNbSetWeight(aa=1.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.9634354419021528, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=2.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.92820785071319645, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=0.5)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.98154747307613843, delta=1e-8)
self.assertNotEqual(AngleNbSetWeight(1.0), AngleNbSetWeight(2.0))
# nb_set with no neighbor
fake_nb_set.angles = []
angle_weight = AngleNbSetWeight(aa=1.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.0, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=2.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.0, delta=1e-8)
# nb_set with one neighbor
fake_nb_set.angles = [3.08570351705799]
angle_weight = AngleNbSetWeight(aa=1.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.24555248382791284, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=2.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.060296022314057396, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=0.5)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 0.49553252549950022, delta=1e-8)
# nb_set with 6 neighbors (sum of the angles is 4*pi, i.e. the full sphere)
fake_nb_set.angles = [1.8595833644514066, 0.459483788407816, 2.622518848090717,
3.08570351705799, 2.2695472184920042, 2.2695338778592387]
angle_weight = AngleNbSetWeight(aa=1.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 1.0, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=2.0)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 1.0, delta=1e-8)
angle_weight = AngleNbSetWeight(aa=0.5)
aw = angle_weight.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(aw, 1.0, delta=1e-8)
def test_normalized_angle_distance_weight(self):
fake_nb_set = FakeNbSet()
dummy_se = DummyStructureEnvironments()
nadw1 = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=1, bb=1)
nadw2 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=1, bb=1)
nadw3 = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=0, bb=1)
nadw4 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=1, bb=0)
nadw5 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=0.1, bb=0.1)
nadw6 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=0, bb=0.1)
nadw7 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=0.1, bb=0)
nadw8 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=2, bb=0)
nadw9 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=0, bb=2)
nadw10 = NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=2, bb=2)
nadw11 = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=1, bb=2)
nadw12 = NormalizedAngleDistanceNbSetWeight(average_type='geometric', aa=2, bb=1)
self.assertNotEqual(nadw11, nadw12)
with self.assertRaisesRegex(ValueError, 'Both exponents are 0.'):
NormalizedAngleDistanceNbSetWeight(average_type='arithmetic', aa=0, bb=0)
with self.assertRaisesRegex(ValueError, 'Average type is "arithmetix" '
'while it should be "geometric" or "arithmetic"'):
NormalizedAngleDistanceNbSetWeight(average_type='arithmetix', aa=1, bb=1)
fake_nb_set.normalized_distances = [1.2632574171572457, 1.1231971151388764, 1.0,
1.1887986376446249, 1.188805134890625]
fake_nb_set.normalized_angles = [0.6026448601336767, 0.8498933334305273, 1.0,
0.7355039801931018, 0.7354996568248028]
w1 = nadw1.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w1, 0.67310887189488189, delta=1e-8)
w2 = nadw2.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w2, 0.69422258996523023, delta=1e-8)
w3 = nadw3.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w3, 0.8700949310182079, delta=1e-8)
w4 = nadw4.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w4, 0.7847083661164217, delta=1e-8)
w5 = nadw5.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w5, 0.96148050989126843, delta=1e-8)
w6 = nadw6.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w6, 0.98621181678741754, delta=1e-8)
w7 = nadw7.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w7, 0.97479580875402994, delta=1e-8)
w8 = nadw8.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w8, 0.63348507114489783, delta=1e-8)
w9 = nadw9.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w9, 0.7668954450583646, delta=1e-8)
w10 = nadw10.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w10, 0.51313920014833292, delta=1e-8)
w11 = nadw11.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w11, 0.585668617459, delta=1e-8)
w12 = nadw12.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w12, 0.520719679281, delta=1e-8)
def test_CN_bias_weight(self):
fake_nb_set = FakeNbSet()
dummy_se = DummyStructureEnvironments()
bias_weight1 = CNBiasNbSetWeight.linearly_equidistant(weight_cn1=1.0, weight_cn13=13.0)
bias_weight2 = CNBiasNbSetWeight.geometrically_equidistant(weight_cn1=1.0, weight_cn13=1.1**12)
bias_weight3 = CNBiasNbSetWeight.explicit(cn_weights={1: 1.0, 2: 3.0, 3: 3.2, 4: 4.0,
5: 4.1, 6: 4.2, 7: 4.3, 8: 4.4,
9: 4.5, 10: 4.6, 11: 4.6, 12: 4.7,
13: 4.8})
with self.assertRaisesRegex(ValueError, 'Weights should be provided for CN 1 to 13'):
CNBiasNbSetWeight.explicit(cn_weights={1: 1.0, 13: 2.0})
fake_nb_set.cn = 1
w1 = bias_weight1.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w1, 1.0, delta=1e-8)
w2 = bias_weight2.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w2, 1.0, delta=1e-8)
w3 = bias_weight3.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w3, 1.0, delta=1e-8)
fake_nb_set.cn = 7
w1 = bias_weight1.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w1, 7.0, delta=1e-8)
w2 = bias_weight2.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w2, 1.1**6, delta=1e-8)
w3 = bias_weight3.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w3, 4.3, delta=1e-8)
fake_nb_set.cn = 13
w1 = bias_weight1.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w1, 13.0, delta=1e-8)
w2 = bias_weight2.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w2, 1.1**12, delta=1e-8)
w3 = bias_weight3.weight(nb_set=fake_nb_set, structure_environments=dummy_se)
self.assertAlmostEqual(w3, 4.8, delta=1e-8)
bias_weight4 = CNBiasNbSetWeight.from_description({'type': 'linearly_equidistant',
'weight_cn1': 2.0,
'weight_cn13': 26.0})
for cn in range(1, 14):
self.assertAlmostEqual(bias_weight4.cn_weights[cn], 2.0*cn)
bias_weight5 = CNBiasNbSetWeight.from_description({'type': 'geometrically_equidistant',
'weight_cn1': 1.0,
'weight_cn13': 13.0})
self.assertAlmostEqual(bias_weight5.cn_weights[1], 1.0)
self.assertAlmostEqual(bias_weight5.cn_weights[3], 1.5334062370163877)
self.assertAlmostEqual(bias_weight5.cn_weights[9], 5.5287748136788739)
self.assertAlmostEqual(bias_weight5.cn_weights[12], 10.498197520079623)
cn_weights = {cn: 0.0 for cn in range(1, 14)}
cn_weights[6] = 2.0
cn_weights[4] = 1.0
bias_weight6 = CNBiasNbSetWeight.from_description({'type': 'explicit',
'cn_weights': cn_weights})
self.assertAlmostEqual(bias_weight6.cn_weights[1], 0.0)
self.assertAlmostEqual(bias_weight6.cn_weights[4], 1.0)
self.assertAlmostEqual(bias_weight6.cn_weights[6], 2.0)
def test_self_csms_weight(self):
# Get the StructureEnvironments for K2NaNb2Fe7Si8H4O31 (mp-743972)
f = open(os.path.join(se_files_dir, 'se_mp-743972.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
# Get neighbors sets for which we get the weights
cn_maps = [(12, 3), (12, 2), (13, 2), (12, 0), (12, 1)]
nbsets = {cn_map: se.neighbors_sets[0][cn_map[0]][cn_map[1]] for cn_map in cn_maps}
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
weight_estimator = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}}
weight_estimator2 = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.1,
'alpha': 1.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
self_weight = SelfCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
self_weight2 = SelfCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator2,
symmetry_measure_type=symmetry_measure_type)
self.assertNotEqual(self_weight, self_weight2)
additional_info = {}
cn_map = (12, 3)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.11671945916431022, delta=1e-8)
cn_map = (12, 2)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.0, delta=1e-8)
cn_map = (12, 0)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.0, delta=1e-8)
cn_map = (12, 1)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.0, delta=1e-8)
cn_map = (13, 2)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.14204073172729198, delta=1e-8)
# Get the StructureEnvironments for SiO2 (mp-7000)
f = open(os.path.join(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
# Get neighbors sets for which we get the weights
cn_maps = [(2, 0), (4, 0)]
nbsets = {cn_map: se.neighbors_sets[6][cn_map[0]][cn_map[1]] for cn_map in cn_maps}
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
weight_estimator = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
self_weight = SelfCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
additional_info = {}
cn_map = (2, 0)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.8143992162836029, delta=1e-8)
cn_map = (4, 0)
self_w = self_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(self_w, 0.99629742352359496, delta=1e-8)
def test_delta_csms_weight(self):
# Get the StructureEnvironments for K2NaNb2Fe7Si8H4O31 (mp-743972)
f = open(os.path.join(se_files_dir, 'se_mp-743972.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
# Get neighbors sets for which we get the weights
cn_maps = [(12, 3), (12, 2), (13, 2), (12, 0), (12, 1), (13, 0), (13, 1)]
nbsets = {cn_map: se.neighbors_sets[0][cn_map[0]][cn_map[1]] for cn_map in cn_maps}
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
weight_estimator = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
additional_info = {}
cn_map = (12, 3)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (12, 2)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (12, 0)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (12, 1)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (13, 2)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 1.0, delta=1e-8)
cn_map = (13, 0)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (13, 1)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
weight_estimator = {'function': 'smootherstep',
'options': {'delta_csm_min': -1.0,
'delta_csm_max': 3.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
additional_info = {}
cn_map = (12, 3)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.040830741048481355, delta=1e-8)
cn_map = (13, 2)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 1.0, delta=1e-8)
cn_map = (13, 0)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.103515625, delta=1e-8)
cn_map = (13, 1)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.103515625, delta=1e-8)
# Get the StructureEnvironments for SiO2 (mp-7000)
f = open(os.path.join(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
# Get neighbors sets for which we get the weights
cn_maps = [(2, 0), (4, 0)]
nbsets = {cn_map: se.neighbors_sets[6][cn_map[0]][cn_map[1]] for cn_map in cn_maps}
effective_csm_estimator = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
weight_estimator = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight(effective_csm_estimator=effective_csm_estimator,
weight_estimator=weight_estimator,
symmetry_measure_type=symmetry_measure_type)
additional_info = {}
cn_map = (2, 0)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 0.0, delta=1e-8)
cn_map = (4, 0)
delta_w = delta_weight.weight(nb_set=nbsets[cn_map], structure_environments=se,
cn_map=cn_map, additional_info=additional_info)
self.assertAlmostEqual(delta_w, 1.0, delta=1e-8)
def test_dist_angle_area_weight(self):
surface_definition = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.2, 'upper': 0.8}}
da_area_weight = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
d1, d2, a1, a2 = 1.05, 1.15, 0.05, 0.08
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.05, 1.15, 0.1, 0.2
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.9, 1.95, 0.1, 0.2
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.05, 1.95, 0.05, 0.25
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.05, 1.95, 0.75, 0.9
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.1, 1.9, 0.1, 0.9
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.23, 1.77, 0.48, 0.52
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.23, 1.24, 0.48, 0.52
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.4, 1.6, 0.4, 0.6
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.6, 1.9, 0.7, 0.9
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.5, 1.6, 0.75, 0.78
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.5, 1.6, 0.75, 0.95
self.assertFalse(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.4, 1.6, 0.1, 0.9
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
d1, d2, a1, a2 = 1.4, 1.6, 0.3, 0.7
self.assertTrue(da_area_weight.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2))
def test_dist_nb_set_weight(self):
dnbset_weight = DistanceNbSetWeight()
dnbset_weight2 = DistanceNbSetWeight(weight_function={'function': 'smoothstep',
'options': {'lower': 1.2,
'upper': 1.3}})
fake_nb_set1 = FakeNbSet(cn=1)
fake_nb_set1.site_voronoi_indices = {0}
fake_nb_set2 = FakeNbSet(cn=2)
fake_nb_set2.site_voronoi_indices = {0, 1}
fake_nb_set3 = FakeNbSet(cn=3)
fake_nb_set3.site_voronoi_indices = {0, 1, 2}
fake_nb_set4 = FakeNbSet(cn=4)
fake_nb_set4.site_voronoi_indices = {0, 1, 2, 3}
fake_nb_set5 = FakeNbSet(cn=5)
fake_nb_set5.site_voronoi_indices = {0, 1, 2, 3, 4}
fake_nb_set5_m2 = FakeNbSet(cn=4)
fake_nb_set5_m2.site_voronoi_indices = {0, 1, 3, 4}
fake_nb_set6 = FakeNbSet(cn=6)
fake_nb_set6.site_voronoi_indices = {0, 1, 2, 3, 4, 5}
fake_nb_set7 = FakeNbSet(cn=7)
fake_nb_set7.site_voronoi_indices = {0, 1, 2, 3, 4, 5, 6}
fake_nb_set1.isite = 0
fake_nb_set2.isite = 0
fake_nb_set3.isite = 0
fake_nb_set4.isite = 0
fake_nb_set5.isite = 0
fake_nb_set5_m2.isite = 0
fake_nb_set6.isite = 0
fake_nb_set7.isite = 0
dummy_se = DummyStructureEnvironments()
dummy_se.neighbors_sets = []
dummy_se.neighbors_sets.append({})
dummy_se.neighbors_sets[0][1] = [fake_nb_set1]
dummy_se.neighbors_sets[0][2] = [fake_nb_set2]
dummy_se.neighbors_sets[0][3] = [fake_nb_set3]
dummy_se.neighbors_sets[0][4] = [fake_nb_set4, fake_nb_set5_m2]
dummy_se.neighbors_sets[0][5] = [fake_nb_set5]
dummy_se.neighbors_sets[0][6] = [fake_nb_set6]
dummy_se.neighbors_sets[0][7] = [fake_nb_set7]
dummy_voronoi = DummyVoronoiContainer()
dummy_voronoi.voronoi_list2 = []
dummy_voronoi.voronoi_list2.append([])
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.0}) # 0
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.2}) # 1
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.225}) # 2
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.25}) # 3
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.275}) # 4
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.3}) # 5
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.8}) # 6
# Following fake neighbor dict is not in the neighbors sets
dummy_voronoi.voronoi_list2[0].append({'normalized_distance': 1.55}) # 7
for fake_nb_set in [fake_nb_set1, fake_nb_set2, fake_nb_set3, fake_nb_set4, fake_nb_set5, fake_nb_set5_m2,
fake_nb_set6, fake_nb_set7]:
fake_nb_set.normalized_distances = [dummy_voronoi.voronoi_list2[0][ivoro_nb]['normalized_distance']
for ivoro_nb in fake_nb_set.site_voronoi_indices]
dummy_se.voronoi = dummy_voronoi
cn_map1 = (1, 0)
cn_map2 = (2, 0)
cn_map3 = (3, 0)
cn_map4 = (4, 0)
cn_map5 = (5, 0)
cn_map5_m2 = (4, 1)
cn_map6 = (6, 0)
cn_map7 = (7, 0)
myweight1 = dnbset_weight.weight(fake_nb_set1, dummy_se, cn_map=cn_map1, additional_info=None)
self.assertAlmostEqual(myweight1, 0.0, delta=1e-8)
myweight2 = dnbset_weight.weight(fake_nb_set2, dummy_se, cn_map=cn_map2, additional_info=None)
self.assertAlmostEqual(myweight2, 0.103515625, delta=1e-8)
myweight3 = dnbset_weight.weight(fake_nb_set3, dummy_se, cn_map=cn_map3, additional_info=None)
self.assertAlmostEqual(myweight3, 0.5, delta=1e-8)
myweight4 = dnbset_weight.weight(fake_nb_set4, dummy_se, cn_map=cn_map4, additional_info=None)
self.assertAlmostEqual(myweight4, 0.896484375, delta=1e-8)
myweight5 = dnbset_weight.weight(fake_nb_set5, dummy_se, cn_map=cn_map5, additional_info=None)
self.assertAlmostEqual(myweight5, 1.0, delta=1e-8)
myweight5_m2 = dnbset_weight.weight(fake_nb_set5_m2, dummy_se, cn_map=cn_map5_m2, additional_info=None)
self.assertAlmostEqual(myweight5_m2, 0.103515625, delta=1e-8)
myweight7 = dnbset_weight.weight(fake_nb_set7, dummy_se, cn_map=cn_map7, additional_info=None)
self.assertAlmostEqual(myweight7, 1.0, delta=1e-8)
myweight_2_3 = dnbset_weight2.weight(fake_nb_set3, dummy_se, cn_map=cn_map3, additional_info=None)
self.assertAlmostEqual(myweight_2_3, 0.5, delta=1e-8)
myweight_2_4 = dnbset_weight2.weight(fake_nb_set4, dummy_se, cn_map=cn_map4, additional_info=None)
self.assertAlmostEqual(myweight_2_4, 0.84375, delta=1e-8)
myweight_2_2 = dnbset_weight2.weight(fake_nb_set2, dummy_se, cn_map=cn_map2, additional_info=None)
self.assertAlmostEqual(myweight_2_2, 0.15625, delta=1e-8)
dnbset_weight3 = DistanceNbSetWeight(weight_function={'function': 'smoothstep',
'options': {'lower': 1.5,
'upper': 1.7}},
nbs_source='nb_sets')
dnbset_weight4 = DistanceNbSetWeight(weight_function={'function': 'smoothstep',
'options': {'lower': 1.5,
'upper': 1.7}},
nbs_source='voronoi')
myweight_3_6 = dnbset_weight3.weight(fake_nb_set6, dummy_se, cn_map=cn_map6, additional_info=None)
self.assertAlmostEqual(myweight_3_6, 1.0, delta=1e-8)
myweight_4_6 = dnbset_weight4.weight(fake_nb_set6, dummy_se, cn_map=cn_map6, additional_info=None)
self.assertAlmostEqual(myweight_4_6, 0.15625, delta=1e-8)
deltadnbset_weight = DeltaDistanceNbSetWeight(weight_function={'function': 'smootherstep',
'options': {'lower': 0.05,
'upper': 0.15}})
myweightdelta1 = deltadnbset_weight.weight(fake_nb_set1, dummy_se, cn_map=cn_map1, additional_info=None)
self.assertAlmostEqual(myweightdelta1, 1.0, delta=1e-8)
myweightdelta2 = deltadnbset_weight.weight(fake_nb_set2, dummy_se, cn_map=cn_map2, additional_info=None)
self.assertAlmostEqual(myweightdelta2, 0.0, delta=1e-8)
myweightdelta3 = deltadnbset_weight.weight(fake_nb_set3, dummy_se, cn_map=cn_map3, additional_info=None)
self.assertAlmostEqual(myweightdelta3, 0.0, delta=1e-8)
deltadnbset_weight2 = DeltaDistanceNbSetWeight(weight_function={'function': 'smootherstep',
'options': {'lower': 0.1,
'upper': 0.3}})
myweightdelta1 = deltadnbset_weight2.weight(fake_nb_set1, dummy_se, cn_map=cn_map1, additional_info=None)
self.assertAlmostEqual(myweightdelta1, 0.5, delta=1e-8)
myweightdelta2 = deltadnbset_weight2.weight(fake_nb_set2, dummy_se, cn_map=cn_map2, additional_info=None)
self.assertAlmostEqual(myweightdelta2, 0.0, delta=1e-8)
myweightdelta3 = deltadnbset_weight2.weight(fake_nb_set3, dummy_se, cn_map=cn_map3, additional_info=None)
self.assertAlmostEqual(myweightdelta3, 0.0, delta=1e-8)
deltadnbset_weight3 = DeltaDistanceNbSetWeight(weight_function={'function': 'smoothstep',
'options': {'lower': 0.1,
'upper': 0.5}})
myweightdelta1 = deltadnbset_weight3.weight(fake_nb_set1, dummy_se, cn_map=cn_map1, additional_info=None)
self.assertAlmostEqual(myweightdelta1, 0.15625, delta=1e-8)
myweightdelta6 = deltadnbset_weight3.weight(fake_nb_set6, dummy_se, cn_map=cn_map6, additional_info=None)
self.assertAlmostEqual(myweightdelta6, 0.31640625, delta=1e-8)
deltadnbset_weight4 = DeltaDistanceNbSetWeight(weight_function={'function': 'smoothstep',
'options': {'lower': 0.1,
'upper': 0.5}},
nbs_source='nb_sets')
myweightdelta1 = deltadnbset_weight4.weight(fake_nb_set1, dummy_se, cn_map=cn_map1, additional_info=None)
self.assertAlmostEqual(myweightdelta1, 0.15625, delta=1e-8)
myweightdelta6 = deltadnbset_weight4.weight(fake_nb_set6, dummy_se, cn_map=cn_map6, additional_info=None)
self.assertAlmostEqual(myweightdelta6, 1.0, delta=1e-8)
if __name__ == "__main__":
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_weights.py
|
Python
|
mit
| 34,458
|
[
"pymatgen"
] |
cda54eab77e1a84980c253c14c0318062f1eaa331faa3692816bfe15e383a049
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ALADIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_libdir
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_ALADIN(EasyBlock):
"""Support for building/installing ALADIN."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ALADIN."""
super(EB_ALADIN, self).__init__(*args, **kwargs)
self.conf_file = None
self.conf_filepath = None
self.rootpack_dir = 'UNKNOWN'
self.orig_library_path = None
@staticmethod
def extra_options():
"""Custom easyconfig parameters for ALADIN."""
extra_vars = {
'optional_extra_param': ['default value', "short description", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
raise EasyBuildError("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to build ALADIN: %s", err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
raise EasyBuildError("Failed to build gmkpack: %s", err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'gmkpack_support', 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.dirname(self.conf_filepath)
if not os.path.exists(archdir):
mkdir(archdir, parents=True)
except OSError, err:
raise EasyBuildError("Failed to remove existing file %s: %s", self.conf_filepath, err)
mpich = 'n'
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
raise EasyBuildError("Don't know which grib lib dir to use for compiler %s", comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
# netCDF
netcdf = get_software_root('netCDF')
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf:
netcdfinc = os.path.join(netcdf, 'include')
if netcdf_fortran:
netcdflib = os.path.join(netcdf_fortran, get_software_libdir('netCDF-Fortran'), 'libnetcdff.a')
else:
netcdflib = os.path.join(netcdf, get_software_libdir('netCDF'), 'libnetcdff.a')
if not os.path.exists(netcdflib):
raise EasyBuildError("%s does not exist", netcdflib)
else:
raise EasyBuildError("netCDF(-Fortran) not available")
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', os.getenv('F90')),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
no_qa = [
".*ignored.",
]
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
# patch config file to include right Fortran compiler flags
regex_subs = [(r"^(FRTFLAGS\s*=.*)$", r"\1 %s" % os.getenv('FFLAGS'))]
apply_regex_substitutions(self.conf_filepath, regex_subs)
def build_step(self):
"""No separate build procedure for ALADIN (see install_step)."""
pass
def test_step(self):
"""Custom built-in test procedure for ALADIN."""
if self.cfg['runtest']:
cmd = "test-command"
run_cmd(cmd, simple=True, log_all=True, log_output=True)
def install_step(self):
"""Custom install procedure for ALADIN."""
try:
mkdir(os.getenv('ROOTPACK'), parents=True)
mkdir(os.getenv('HOMEPACK'), parents=True)
except OSError, err:
raise EasyBuildError("Failed to create rootpack dir in %s: %s", err)
# create rootpack
[v1, v2] = self.version.split('_')
(out, _) = run_cmd("source $GMKROOT/util/berootpack && gmkpack -p master -a -r %s -b %s" % (v1, v2), simple=False)
packdir_regexp = re.compile("Creating main pack (.*) \.\.\.")
res = packdir_regexp.search(out)
if res:
self.rootpack_dir = os.path.join('rootpack', res.group(1))
else:
raise EasyBuildError("Failed to determine rootpack dir.")
# copy ALADIN sources to right directory
try:
src_dirs = [d for d in os.listdir(self.builddir) if not (d.startswith('auxlib') or d.startswith('gmk'))]
target = os.path.join(self.installdir, self.rootpack_dir, 'src', 'local')
self.log.info("Copying sources from %s to %s" % (self.builddir, target))
for srcdir in src_dirs:
shutil.copytree(os.path.join(self.builddir, srcdir), os.path.join(target, srcdir))
self.log.info("Copied %s" % srcdir)
except OSError, err:
raise EasyBuildError("Failed to copy ALADIN sources: %s", err)
if self.cfg['parallel']:
env.setvar('GMK_THREADS', str(self.cfg['parallel']))
# build rootpack
run_cmd(os.path.join(self.installdir, self.rootpack_dir, 'ics_master'))
# restore original $LIBRARY_PATH
if self.orig_library_path is not None:
os.environ['LIBRARY_PATH'] = self.orig_library_path
def sanity_check_step(self):
"""Custom sanity check for ALADIN."""
bindir = os.path.join(self.rootpack_dir, 'bin')
libdir = os.path.join(self.rootpack_dir, 'lib')
custom_paths = {
'files': [os.path.join(bindir, x) for x in ['MASTER']] +
[os.path.join(libdir, 'lib%s.local.a' % x) for x in ['aeo', 'ald', 'arp', 'bip',
'bla', 'mpa', 'mse', 'obt',
'odb', 'sat', 'scr', 'sct',
'sur', 'surfex', 'tal', 'tfl',
'uti', 'xla', 'xrd']],
'dirs': [],
}
super(EB_ALADIN, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for ALADIN."""
guesses = super(EB_ALADIN, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.rootpack_dir, 'bin')],
})
return guesses
|
hpcleuven/easybuild-easyblocks
|
easybuild/easyblocks/a/aladin.py
|
Python
|
gpl-2.0
| 15,321
|
[
"NetCDF"
] |
2e03fdb516e2c9576d2d335e69b0c2dbf5395ce2e68cbce03316c30f334ad969
|
"""
Copyright (C) <2010> Autin L.
This file ePMV_git/epmvGui.py is part of ePMV.
ePMV is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ePMV is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ePMV. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 08:38:56 2010
@author: Ludovic Autin - ludovic.autin@gmail.com
"""
# import DejaVu
# DejaVu.enableVertexArray = False
# TODO change all windows size
import os, sys
import upy
# uiadaptor = upy.getUIClass()
from upy import uiadaptor
from ePMV import comput_util as C
import ePMV
from MolKit.molecule import AtomSet
from MolKit.protein import ResidueSetSelector, Chain, Protein, Residue
from ePMV import comput_util as util
# need to check python version...
if sys.version_info < (2, 8):
import ePMV.pmv_dev.APBSCommands_2x as APBS
else:
import ePMV.pmv_dev.APBSCommands as APBS
APBSgui = APBS.APBSgui
from ePMV.pmv_dev.buildDNAGui import BuildDNAGui
from ePMV.register_epmv import Register_User_ePMV_ui
from time import time
class BindGeomToMol(uiadaptor):
# savedialog dont work
def setup(self, epmv, id=None):
self.subdialog = True
self.block = True
self.epmv = epmv
self.title = "Bind Polygon to Mol" # +self.mol.name
self.SetTitle(self.title)
witdh = 350
self.h = 130
self.w = 300
if id is not None:
id = id
else:
id = self.bid
self.id = id
# define the widget here too
self.BTN = {}
# need a filename + browse button
self.BTN["bind"] = self._addElemt(label="Polygon Name",
width=40, height=10)
self._polyname = self.addVariable("str", "")
self.NAME = self._addElemt(name="polyname", action=None, width=100,
value="", type="inputStr", variable=self._polyname)
self.BTN["ok"] = self._addElemt(name="Ok", width=40, height=10,
action=self.bind, type="button")
self.BTN["cancel"] = self._addElemt(name="Cancel", width=40, height=10,
action=self.close, type="button")
self.LABEL = self._addElemt(label="Bind Polygon to current molecule", width=100)
self.setupLayout()
def setupLayout(self):
# form layout for each SS types ?
self._layout = []
self._layout.append([self.LABEL, ])
self._layout.append([self.BTN["bind"], self.NAME])
self._layout.append([self.BTN["ok"], self.BTN["cancel"]])
def bind(self, *args):
# get the object name
name = self.getString(self.NAME)
self.bind_cb(name)
self.epmv.gui.updateCGeomList()
self.drawMessage(message=name + " binded to " + self.mol.name + "!\n")
self.close()
def bind_cb(self, name, ):
self.mol = self.epmv.gui.current_mol
# get the object name
# name = self.getString(self.NAME)
# rule for binded geometry name: b_name
obj = self.epmv.helper.getObject(name)
# check if already exist in geomContainer.
if name in list(self.mol.geomContainer.geoms.keys()):
g = self.mol.geomContainer.geoms["b_" + name]
else:
# create the indexed polygon
from DejaVu.IndexedPolygons import IndexedPolygons
g = IndexedPolygons(name="b_" + name)
# update g
mesh = obj # self.epmv.helper.getMesh(obj)
g.Set(vertices=self.epmv.helper.getMeshVertices(mesh),
faces=self.epmv.helper.getMeshFaces(mesh))
# do the binding
self.epmv.mv.bindGeomToMolecularFragment(g, self.mol.allAtoms)
self.epmv._addObjToGeom([obj, ], g)
# add it to the main combo_box if not already there
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
return True
def Command(self, *args):
# print args
self._command(args)
return True
class ParameterModeller(uiadaptor):
def setup(self, epmv, id=1005):
self.subdialog = True
self.block = True # special type for blender
self.title = "Modeller"
self.SetTitle(self.title)
self.epmv = epmv
witdh = 200
self.h = 350
self.w = 300
if id is not None:
id = id
else:
id = self.bid
self.id = id
self.LABEL_ID = {}
self.NUMBERS = {
"miniIterMax": self._addElemt(name="Max iteration", width=50, height=10,
action=None, type="inputInt",
icon=None,
value=1000,
variable=self.addVariable("int", 1000),
mini=0, maxi=100000, step=1),
"mdIterMax": self._addElemt(name="Max iteration", width=50, height=10,
action=None, type="inputInt",
icon=None,
value=1000,
variable=self.addVariable("int", 1000),
mini=0, maxi=100000, step=1),
"mdTemp": self._addElemt(name="Temperature", width=50, height=10,
action=None, type="inputInt",
icon=None,
value=300,
variable=self.addVariable("int", 300),
mini=-100, maxi=1000, step=1),
"rtstep": self._addElemt(name="number of steps", width=50, height=10,
action=self.setRealTimeStep,
type="inputInt",
icon=None,
value=2,
variable=self.addVariable("int", 2),
mini=0, maxi=100, step=1)
}
self.LABEL_ID["miniIterMax"] = self._addElemt(
label=self.NUMBERS["miniIterMax"]["name"],
width=80)
self.LABEL_ID["mdIterMax"] = self._addElemt(
label=self.NUMBERS["mdIterMax"]["name"],
width=80)
self.LABEL_ID["mdTemp"] = self._addElemt(
label=self.NUMBERS["mdTemp"]["name"],
width=80)
self.LABEL_ID["rtstep"] = self._addElemt(
label=self.NUMBERS["rtstep"]["name"],
width=80)
self.BTN = {
"mini": self._addElemt(name="Minimize", width=50, height=10,
label='Minimize options',
action=self.epmv.gui.modellerOptimize, type="button"),
"md": self._addElemt(name="run MD", width=50, height=10,
label='Molecular Dynamic options',
action=self.epmv.gui.modellerMD, type="button"),
"cancel": self._addElemt(name="Close", width=50, height=10,
action=self.cancel, type="button"),
"update coordinate": self._addElemt(name="Update coordinates", width=100, height=10,
action=self.updateCoord, type="button")
}
self.LABEL_ID["mini"] = self._addElemt(
label=self.BTN["mini"]["label"],
width=80)
self.LABEL_ID["md"] = self._addElemt(
label=self.BTN["md"]["label"],
width=80)
self.CHECKBOXS = {
"store": self._addElemt(name="store", width=100, height=10,
action=self.setStoring, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"real-time": self._addElemt(name="real-time", width=100, height=10,
action=self.setRealTime, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
'moddisplay': self._addElemt(name="moddisplay", width=100, height=10,
action=None, type="checkbox", icon=None,
variable=self.addVariable("int", 0))
}
self.rtType = ["mini", "md"]
self.sObject = ["cpk", "lines", "bones", "spline"]
self.COMB_BOX = {"sobject": self._addElemt(name="Object",
value=self.sObject,
width=60, height=10, action=self.setObjectSynchrone,
variable=self.addVariable("int", 0),
type="pullMenu", ),
"rtType": self._addElemt(name="rtType",
value=self.rtType,
width=60, height=10, action=self.setRToptimzeType,
variable=self.addVariable("int", 0),
type="pullMenu", )}
self.setupLayout()
return True
def setupLayout(self):
self._layout = []
# Miniisation
elemFrame = []
elemFrame.append([self.LABEL_ID["miniIterMax"], self.NUMBERS["miniIterMax"]])
elemFrame.append([self.BTN["mini"], ])
frame = self._addLayout(name="Minimization", elems=elemFrame, collapse=False)
self._layout.append(frame)
# MD
elemFrame = []
elemFrame.append([self.LABEL_ID["mdIterMax"], self.NUMBERS["mdIterMax"]])
elemFrame.append([self.LABEL_ID["mdTemp"], self.NUMBERS["mdTemp"]])
elemFrame.append([self.BTN["md"], ])
frame = self._addLayout(name="Molecular Dynamics", elems=elemFrame, collapse=False)
self._layout.append(frame)
# RealTime
elemFrame = []
elemFrame.append([self.CHECKBOXS["real-time"], self.COMB_BOX["rtType"]])
elemFrame.append([self.LABEL_ID["rtstep"], self.NUMBERS["rtstep"]])
elemFrame.append([self.BTN["update coordinate"], self.COMB_BOX["sobject"]])
frame = self._addLayout(name="Real-Time", elems=elemFrame, collapse=False)
self._layout.append(frame)
# general options
self._layout.append([self.CHECKBOXS["store"], self.CHECKBOXS["moddisplay"]])
# self._layout.append([self.CHECKBOXS["real-time"],self.COMB_BOX["rtType"]])
# self._layout.append([self.LABEL_ID["rtstep"],self.NUMBERS["rtstep"]])
# self._layout.append([self.BTN["update coordinate"],self.COMB_BOX["sobject"]])
# self._layout.append([self.LABEL_ID["mini"],])
# self._layout.append([self.LABEL_ID["miniIterMax"],self.NUMBERS["miniIterMax"]])
# self._layout.append([self.BTN["mini"],])
# self._layout.append([self.LABEL_ID["md"],])
# self._layout.append([self.LABEL_ID["mdIterMax"],self.NUMBERS["mdIterMax"]])
# self._layout.append([self.LABEL_ID["mdTemp"],self.NUMBERS["mdTemp"]])
# self._layout.append([self.BTN["md"],])
self._layout.append([self.BTN["cancel"], ])
# return True
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
return True
# overwrite RT checkbox action
def setRealTime(self, *args):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
mol.pmvaction.temp = self.getLong(self.NUMBERS["mdTemp"])
mol.pmvaction.realtime = self.getBool(self.CHECKBOXS['real-time'])
def setRealTimeStep(self, *args):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
mol.pmvaction.mdstep = self.getLong(self.NUMBERS['rtstep'])
def setStoring(self, *args):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
mol.pmvaction.store = self.getBool(self.CHECKBOXS['store'])
def setObjectSynchrone(self, *args):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
mol.pmvaction.sObject = self.sObject[self.getLong(self.COMB_BOX["sobject"])]
def setRToptimzeType(self, *args):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
rtype = self.rtType[self.getLong(self.COMB_BOX["rtType"])]
if mol.pmvaction.rtType != rtype:
mol.pmvaction.rtType = rtype
# need to update the optimizer ie delete and create a new one
mol.pmvaction.resetOptimizer()
def updateCoord(self, *args):
if hasattr(self.epmv, 'gui'):
mol = self.epmv.gui.current_mol
if hasattr(mol, 'pmvaction'):
self.epmv.updateMolAtomCoord(mol, mol.pmvaction.idConf, types=mol.pmvaction.sObject)
mol.pmvaction.updateModellerCoord(mol.pmvaction.idConf, mol.mdl)
def doIt(self, *args):
# print args
pass
def cancel(self, *args):
self.close()
def Command(self, *args):
# print args
self._command(args)
return True
class ParameterScoringGui(uiadaptor):
_scorer = 'ad3Score'
_display = False
label = None
def setup(self, epmv, id=1005):
self.subdialog = True
self.block = True # special type for blender
self.title = "PyAutodock"
self.SetTitle(self.title)
self.epmv = epmv
self.h = 320
self.w = 220
witdh = 350
if id is not None:
id = id
else:
id = self.bid
self.BTN = {"rec": self._addElemt(id=id, name="Browse", width=50, height=10,
action=None, type="button"),
"lig": self._addElemt(id=id + 1, name="Browse", width=50, height=10,
action=None, type="button"),
"ok": self._addElemt(id=id + 2, name="Add Scorer", width=100, height=10,
action=self.setupScoring, type="button"),
"gScore": self._addElemt(id=id + 3, name="Get Score", width=100, height=10,
action=self.getScore, type="button"),
"cancel": self._addElemt(id=id + 4, name="Close", width=100, height=10,
action=self.cancel, type="button")
}
id = id + len(self.BTN)
self.LABEL_ID = {"rec": self._addElemt(id=id, label="Receptor", width=100, height=10),
"lig": self._addElemt(id=id + 1, label="Ligand", width=100, height=10),
"score": self._addElemt(id=id + 2, label="Type of score", width=100, height=10),
"scorer": self._addElemt(id=id + 2, label="Available scorer", width=100, height=10),
}
id = id + len(self.LABEL_ID)
# txt input
self.TXT = {"rec": self._addElemt(id=id, name="Receptor", action=None, width=100,
value="hsg1:::;", type="inputStr",
variable=self.addVariable("str", "hsg1:::;")),
"lig": self._addElemt(id=id + 1, name="Ligand", action=None, width=100,
value="ind:::;", type="inputStr",
variable=self.addVariable("str", "ind:::;"))
}
id = id + len(self.TXT)
self.scorertype = ['PyPairWise', 'ad3Score', 'ad4Score']
if C.cAD: # cAutodock is available
self.scorertype.append('c_ad3Score')
self.scorertype.append('PairWise')
self.scoreravailable = self.getScorerAvailable()
self.COMB_BOX = {"score": self._addElemt(id=id, name="Type of score",
value=self.scorertype,
width=60, height=10, action=self.setScorer,
variable=self.addVariable("int", 0),
type="pullMenu", ),
"scorer": self._addElemt(id=id + 1, name="Available scorer",
value=self.scoreravailable,
width=60, height=10, action=self.setCurrentScorer,
variable=self.addVariable("int", 0),
type="pullMenu", ),
}
id = id + len(self.COMB_BOX)
self.CHECKBOXS = {"store": self._addElemt(id=id, name="Store",
width=100, height=10,
action=None, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"displayLabel": self._addElemt(id=id + 1, name="Display Label",
width=100, height=10,
action=self.toggleDisplay, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"colorRec": self._addElemt(id=id + 2, name="Color Rec",
width=100, height=10,
action=self.toggleColor, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"colorLig": self._addElemt(id=id + 3, name="Color Lig",
width=100, height=10,
action=self.toggleColor, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"realtime": self._addElemt(id=id + 4, name="Real time", width=100, height=10,
action=self.setRealtime, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
}
id = id + len(self.CHECKBOXS)
self.setupLayout()
return True
def setupLayout(self):
self._layout = []
# setup
self._layout.append([self.LABEL_ID["rec"], self.TXT["rec"]])
self._layout.append([self.LABEL_ID["lig"], self.TXT["lig"]])
self._layout.append([self.LABEL_ID["score"], self.COMB_BOX["score"]])
self._layout.append([self.BTN["ok"], ])
# current scorer if any
self._layout.append([self.LABEL_ID["scorer"], self.COMB_BOX["scorer"]])
# option for the current score
for butk in list(self.CHECKBOXS.keys()):
self._layout.append([self.CHECKBOXS[butk], ])
self._layout.append([self.BTN["gScore"], ])
self._layout.append([self.BTN["cancel"], ])
def CreateLayout(self):
self._createLayout()
return True
def setRealtime(self, *args):
if hasattr(self.epmv.mv, 'energy'):
self.epmv.mv.energy.realTime = self.getBool(self.CHECKBOXS['realtime'])
def toggleDisplay(self, *args):
if hasattr(self.epmv.mv, 'energy'):
display = self.getBool(self.CHECKBOXS['displayLabel'])
if display:
if self.label is None:
self.initDisplay()
self.epmv._toggleDisplay(self.label, display)
if not hasattr(self.epmv.mv.energy, 'label'):
setattr(self.epmv.mv.energy, 'label', display)
else:
self.epmv.mv.energy.label = display
def toggleStore(self, *args):
store = self.getBool(self.CHECKBOXS['store'])
def toggleColor(self, *args):
if hasattr(self.epmv.mv, 'energy'):
r = self.getBool(self.CHECKBOXS['colorRec'])
l = self.getBool(self.CHECKBOXS['colorLig'])
if hasattr(self.epmv.mv, 'energy'):
if not hasattr(self.epmv.mv.energy, 'color'):
setattr(self.epmv.mv.energy, 'color', [r, l])
else:
self.epmv.mv.energy.color = [r, l]
def getScore(self, *args):
if hasattr(self.epmv.mv, 'energy'):
self.epmv.get_nrg_score(self.epmv.mv.energy)
def setRec(self):
pass
def setLig(self):
pass
def getScorerAvailable(self, *args):
if hasattr(self.epmv.mv, 'energy'):
return list(self.epmv.mv.energy.data.keys())
else:
return []
def setCurrentScorer(self, *args):
self.scoreravailable = self.getScorerAvailable()
if self.scoreravailable:
name = self.scoreravailable[self.getLong(self.COMB_BOX["scorer"])]
self.epmv.mv.energy.current_scorer = self.epmv.mv.energy.data[name]
def setScorer(self, *args):
self._scorer = self.scorertype[self.getLong(self.COMB_BOX["score"])]
def setupScoring(self, *args):
# get Rec
rname = self.getString(self.TXT['rec'])
# get Lig
print(rname)
lname = self.getString(self.TXT['lig'])
print(lname)
# is this can be selection ? yep
# recSet=self.mv.select(rname,negate=False, only=True, xor=False,
# log=0, intersect=False)
recSet = self.epmv.mv.expandNodes(rname)
rec = recSet[0].top
# =self.epmv.mv.getMolFromName(rname)
ligSet = self.epmv.mv.expandNodes(lname)
lig = ligSet[0].top
# test lig and rec
scorer_name = rec.name + '-' + lig.name + '-' + self._scorer
if rec is not None and lig is not None:
if not hasattr(self.epmv.mv, 'energy'):
self.epmv.mv.energy = C.EnergyHandler(self.epmv.mv)
self.getScorerAvailable()
self.epmv.mv.energy.add(recSet, ligSet, score_type=self._scorer)
self.addItemToPMenu(self.COMB_BOX["scorer"], scorer_name)
confNum = 1
for mol in [rec, lig]:
# check number of conformations available
current_confNum = len(mol.allAtoms[0]._coords) - 1
mol.allAtoms.addConformation(mol.allAtoms.coords)
mol.cconformationIndex = len(mol.allAtoms[0]._coords) - 1
self.toggleDisplay()
self.toggleColor()
self.setRealtime()
def initDisplay(self):
scene = self.epmv.helper.getCurrentScene()
# label
self.label = label = self.epmv.helper.newEmpty("label")
self.epmv.helper.addObjectToScene(scene, label)
self.epmv.helper.constraintLookAt(label)
listeName = ["score", "el", "hb", "vw", "so"]
y = 0.0
self.listeO = []
for i, name in enumerate(listeName):
o = self.epmv.helper.Text(name, string=name + " : 0.00", pos=[0., y, 0.],
parent=label)
self.listeO.append(o)
y += 5.0
self.epmv.mv.energy.labels = self.listeO
# constraint the label to be oriented toward the persp camera
self.epmv.mv.energy.display = True
self.epmv.mv.energy.label = True
def cancel(self, *args):
self.close()
def Command(self, *args):
# print args
self._command(args)
return True
class Parameter_beadRibbons(uiadaptor):
def setup(self, epmv, id=None):
self.subdialog = True
self.block = True
self.title = "beadRibbons"
self.SetTitle(self.title)
self.epmv = epmv
witdh = 350
self.h = 250
self.w = 200
if id is not None:
id = id
else:
id = self.bid
# default value and parameters type
self.tapertypes = ["sin", "linear", "cos"]
self.paramstype = {'quality': {"type": "inputInt", "value": 12},
'taperLength': {"type": "inputInt", "value": 6},
'taperType': {"type": "pullMenu", "value": self.tapertypes},
'helixBeaded': {"type": "checkbox", "value": 1},
# 'helixCylinder':{"type":"checkbox","value":0},
'helixWidth': {"type": "inputFloat", "value": 1.6},
'helixThick': {"type": "checkbox", "value": 1},
'helixThickness': {"type": "inputFloat", "value": 0.20},
'helixBeadRadius': {"type": "inputFloat", "value": 0.32},
'helixColor1': {"type": "color", "value": (1, 1, 1)},
'helixColor2': {"type": "color", "value": (1, 0, 1)},
'helixBeadColor1': {"type": "color", "value": (1, 1, 1)},
'helixBeadColor2': {"type": "color", "value": (1, 1, 1)},
'helixSideColor': {"type": "color", "value": (1, 1, 1)},
'coilRadius': {"type": "inputFloat", "value": 0.1},
'coilColor': {"type": "color", "value": (1, 1, 1)},
'turnRadius': {"type": "inputFloat", "value": 0.1},
'turnColor': {"type": "color", "value": (0, 0, 1)},
'sheetBeaded': {"type": "checkbox", "value": 1},
'sheetWidth': {"type": "inputFloat", "value": 1.6},
'sheetBodyStartScale': {"type": "inputFloat", "value": 0.4},
'sheetThick': {"type": "checkbox", "value": 1},
'sheetThickness': {"type": "inputFloat", "value": 0.20},
'sheetBeadRadius': {"type": "inputFloat", "value": 0.32},
'sheetColor1': {"type": "color", "value": (1, 1, 0)},
'sheetColor2': {"type": "color", "value": (0, 1, 1)},
'sheetBeadColor1': {"type": "color", "value": (1, 1, 1)},
'sheetBeadColor2': {"type": "color", "value": (1, 1, 1)},
'sheetSideColor': {"type": "color", "value": (1, 1, 1)},
'sheetArrowhead': {"type": "checkbox", "value": 1},
'sheetArrowheadWidth': {"type": "inputFloat", "value": 2.0},
'sheetArrowHeadLength': {"type": "inputInt", "value": 8},
}
# create the widget
# size order ???
self.PARAMS = {}
self.LABELS = {}
for key in self.paramstype:
self.PARAMS[key] = self._addElemt(name=key,
width=40, height=10,
action=self.SetPreferences, type=self.paramstype[key]["type"],
icon=None,
value=self.paramstype[key]["value"],
variable=self.addVar(self.paramstype[key]["type"],
self.paramstype[key]["value"]))
self.LABELS[key] = self._addElemt(label=key, width=100)
self.BTN = {}
self.BTN["ok"] = self._addElemt(name="Update Bead", action=self.SetPreferences, width=50,
type="button")
self.BTN["reset"] = self._addElemt(name="Reset to default", action=self.restorePreferences, width=50,
type="button")
self.BTN["close"] = self._addElemt(name="Close", action=self.close, width=50,
type="button")
self.setupLayout()
return True
def setupLayout(self):
# form layout for each SS types ?
self._layout = []
ordered = list(self.PARAMS.keys())
ordered.sort()
# frame=[]
i = 0
if self.host != "blender24":
label = ["helix", "sheet", "coil", "turn", "general"]
frame = {}
elemframe = {}
for l in label:
elemframe[l] = []
for key in ordered:
found = False
for l in label:
if key.find(l) != -1:
elemframe[l].append([self.LABELS[key], self.PARAMS[key], ])
found = True
break
if not found:
elemframe["general"].append([self.LABELS[key], self.PARAMS[key], ])
for l in label:
frame = self._addLayout(name=l, elems=elemframe[l], collapse=True)
self._layout.append(frame)
else:
while i < len(ordered) - 3: # in range(len(ordered)/2):
self._layout.append([self.LABELS[ordered[i]], self.PARAMS[ordered[i]],
self.LABELS[ordered[i + 1]], self.PARAMS[ordered[i + 1]],
self.LABELS[ordered[i + 2]], self.PARAMS[ordered[i + 2]]])
i = i + 3
# for key in ordered:
# self._layout.append([self.LABELS[key],self.PARAMS[key],])
# elemFrame=[]
# elemFrame.append([self.LOAD_BTN,self.LABEL_ID[0],self.LABEL_ID[1]])
# elemFrame.append([self.EDIT_TEXT,self.FETCH_BTN,self.COMB_BOX["pdbtype"]])
## elemFrame.append([self.LABEL_ID[2],self.COMB_BOX["mol"]])
#
# frame = self._addLayout(id=196,name="Get a Molecule",elems=elemFrame,collapse=False)
# self._layout.append(frame)
#
self._layout.append([self.BTN["reset"], ])
self._layout.append([self.BTN["ok"], self.BTN["close"]])
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
return True
def SetPreferences_cb(self, param):
if hasattr(self.epmv.gui, 'current_mol'):
mol = self.epmv.gui.current_mol
self.epmv.storeLastUsed(mol.name, "bead", param)
if self.epmv.uniq_ss:
self.epmv.mv.beadedRibbonsUniq(mol, redraw=0, createEvents=False, **param)
else:
self.epmv.mv.beadedRibbons(mol, redraw=0, createEvents=False, **param)
def SetPreferences(self, *args):
# get the value
# could use a general getCommand on the elem
param = {}
for key in self.PARAMS:
param[key] = self.getVal(self.PARAMS[key])
self.SetPreferences_cb(param)
def restorePreferences(self, *args):
for key in self.paramstype:
# print (key,self.PARAMS[key],self.paramstype[key]["value"])
try:
self.setVal(self.PARAMS[key], self.paramstype[key]["value"])
except:
print(("problem with ", key, self.paramstype[key]["value"]))
def Command(self, *args):
# print args
self._command(args)
return True
from Pmv.pmvPalettes import AtomElements
from Pmv.pmvPalettes import DavidGoodsell, DavidGoodsellSortedKeys
from Pmv.pmvPalettes import RasmolAmino, RasmolAminoSortedKeys
from Pmv.pmvPalettes import Shapely
from Pmv.pmvPalettes import SecondaryStructureType
from Pmv.colorPalette import ColorPaletteNG, ColorPaletteFunctionNG
class Parameter_pmvPalette(uiadaptor):
def setup(self, epmv, id=None):
self.subdialog = True
self.block = True
self.title = "pmv Palette"
self.SetTitle(self.title)
self.epmv = epmv
# self.witdh=200
self.w = 200
self.h = 300
from mglutil.util.defaultPalettes import MolColors, Rainbow, RainbowSortedKey
c = 'Color palette chain number'
self.epmv.mv.colorByChains.palette = ColorPaletteFunctionNG(
'MolColors', MolColors, readonly=0, info=c,
lookupFunction=lambda x, length=len(RainbowSortedKey): \
x.number % length, sortedkeys=RainbowSortedKey)
chainPalette = {}
chainPaletteDefault = {}
for i, label in enumerate(self.epmv.mv.colorByChains.palette.labels):
chainPaletteDefault[label] = chainPalette[label] = self.epmv.mv.colorByChains.palette.ramp[i]
if id is not None:
id = id
else:
id = self.bid
self.defaultColor = {
"atoms": AtomElements.copy(),
"atomsDG": DavidGoodsell.copy(),
"amino": self.epmv.RasmolAminocorrected.copy(),
"aminoS": Shapely.copy(),
"ss": SecondaryStructureType.copy(),
"chain": chainPaletteDefault
}
self.listPalettes = {
"atoms": ["atoms type", AtomElements],
"atomsDG": ["atoms polarity type", DavidGoodsell],
"amino": ["residue type", self.epmv.RasmolAminocorrected],
"aminoS": ["residue shape type", Shapely],
"ss": ["secondary structure", SecondaryStructureType],
"chain": ["chains", chainPalette]
}
self.group = {}
for key in self.listPalettes:
self.group[key] = {}
palette = self.listPalettes[key][1]
self.group[key]["title"] = self._addElemt(label=self.listPalettes[key][0], width=100)
self.group[key]["widget"] = {}
for name in palette:
pname = name
if key == "chain":
name = "ch" + name
widget = self._addElemt(name=name,
width=40, height=10,
action=self.SetColors, type="color",
icon=None,
value=palette[pname],
variable=self.addVar("color", palette[pname]))
label = self._addElemt(label=name, width=100)
self.group[key]["widget"][name] = [label, widget]
self.BTN = {}
self.BTN["ok"] = self._addElemt(name="Set Color", action=self.SetColors, width=50,
type="button")
self.BTN["reset"] = self._addElemt(name="Reset to default", action=self.restoreColors, width=50,
type="button")
self.BTN["close"] = self._addElemt(name="Close", action=self.close, width=50,
type="button")
self.setupLayout()
return True
def setupLayout(self):
# form layout for each SS types ?
self._layout = []
ordered = ["atoms", "atomsDG", "amino", "aminoS", "ss", "chain"]
# if self.host != "blender":
frame = {}
elemframe = {}
for l in ordered:
elemframe[l] = []
for key in ordered:
for name in self.group[key]["widget"]:
wi = self.group[key]["widget"][name][1]
la = self.group[key]["widget"][name][0]
elemframe[key].append([wi, la, ])
frame = self._addLayout(name=self.listPalettes[key][0],
elems=elemframe[key], collapse=True, )
# type = "tab")
self._layout.append(frame)
# else:
# for key in ordered:
# title = self.group[key]["title"]
# self._layout.append([title,])
# for name in self.group[key]["widget"]:
# print key,name,name in self.group[key]["widget"]
# w = self.group[key]["widget"][name][1]
# l = self.group[key]["widget"][name][0]
# self._layout.append([l,w])
self._layout.append([self.BTN["reset"], ])
self._layout.append([self.BTN["ok"], self.BTN["close"]])
def CreateLayout(self):
self._createLayout()
if self.host != "maya" and self.host != "blender25":
# print "restore"
self.restorePreferences()
return True
def SetColors(self, *args):
# get the value
# could use a general getCommand on the elem
ordered = ["atoms", "atomsDG", "amino", "aminoS", "ss", "chain"]
fcolors = [self.epmv.mv.colorByAtomType,
self.epmv.mv.colorAtomsUsingDG,
self.epmv.mv.colorByResidueType,
self.epmv.mv.colorResiduesUsingShapely,
self.epmv.mv.colorBySecondaryStructure,
self.epmv.mv.colorByChains] # ,
# self.epmv.mv.color,
# self.epmv.mv.colorByProperty]
for i, key in enumerate(ordered):
palette = self.listPalettes[key][1]
for name in palette:
wname = name
if key == "chain":
wname = "ch" + name
w = self.group[key]["widget"][wname][1]
color = self.getVal(w)
self.listPalettes[key][1][name] = color
lfction = None
lmenber = fcolors[i].palette.lookupMember
colorClass = ColorPaletteNG
if hasattr(fcolors[i].palette, "lookupFunction"):
lfction = fcolors[i].palette.lookupFunction
colorClass = ColorPaletteFunctionNG
fcolors[i].palette = colorClass(
self.listPalettes[key][0], self.listPalettes[key][1], readonly=0,
lookupFunction=lfction)
else:
fcolors[i].palette = colorClass(
self.listPalettes[key][0], self.listPalettes[key][1], readonly=0,
lookupMember=lmenber)
# need to update the palette attached to the MV commands
def restoreColors(self, *args):
for key in self.defaultColor:
palette = self.defaultColor[key]
for name in palette:
pname = name
if key == "chain":
name = "ch" + name
w = self.group[key]["widget"][name][1]
self.setVal(w, palette[pname])
self.listPalettes[key][1][name] = palette[name]
def restorePreferences(self, *args):
for key in self.listPalettes:
# self.group[key] = {}
palette = self.listPalettes[key][1]
for name in palette:
pname = name
if key == "chain":
name = "ch" + name
if key in self.group and "widget" in self.group[key]:
w = self.group[key]["widget"][name][1]
self.setVal(w, palette[pname])
self.listPalettes[key][1][name] = palette[name]
def Command(self, *args):
# print args
self._command(args)
return True
class SavePanel(uiadaptor):
# savedialog dont work
def setup(self, epmv, id=None):
self.subdialog = True
self.block = True
self.epmv = epmv
self.title = "Save Current Molecule" # +self.mol.name
self.SetTitle(self.title)
self.w = 250
self.h = 200
witdh = 350
if id is not None:
id = id
else:
id = self.bid
self.id = id
# define the widget here too
self.BTN = {}
# need a filename + browse button
self.filename = None
self._filename = self.addVariable("str", "")
self.FILE = self._addElemt(name="file", action=None, width=100,
value="", type="inputStr", variable=self._filename)
self.BTN["browse"] = self._addElemt(name="Browse", width=40, height=10,
action=self.browse, type="button") # self.buttonBrowse
# need a check box for transfrm + pullDown menu of possible transformation node ie spline bones..
self.BTN["transf"] = self._addElemt(name="Apply Transformation from ", width=120, height=10,
action=None, type="checkbox", icon=None,
variable=self.addVariable("int", 0))
self.sObject = ["cpk", "lines", "bones", "spline"]
self.COMB_BOX = self._addElemt(name="Object",
value=self.sObject,
width=60, height=10, action=None,
variable=self.addVariable("int", 0),
type="pullMenu", )
self.BTN["save"] = self._addElemt(name="Save", width=40, height=10,
action=self.save, type="button")
self.BTN["cancel"] = self._addElemt(name="Cancel", width=40, height=10,
action=self.close, type="button")
self.setupLayout()
def setupLayout(self):
# form layout for each SS types ?
self._layout = []
self._layout.append([self.FILE, ]) # self.BTN["browse"]])
self._layout.append([self.BTN["transf"], self.COMB_BOX])
self._layout.append([self.BTN["save"], self.BTN["cancel"]])
def browse_cb(self, filename):
self.setVal(self.FILE, filename)
self.filename = filename
def browse(self, *args):
self.saveDialog(label="choose a file", callback=self.browse_cb)
def save(self, *args):
self.mol = self.epmv.gui.current_mol
# get the filename
filename = self.getVal(self.FILE)
if not filename:
filename = self.filename
# get if transform and the mode of transform
transform = self.getVal(self.BTN["transf"])
if transform:
mode = self.sObject[self.getLong(self.COMB_BOX)]
# do the transform
self.epmv.updateMolAtomCoord(self.mol, 0, types=mode)
self.epmv.mv.writePDB(self.mol, filename=filename)
self.drawMessage(message=self.mol.name + " saved to :\n" + filename)
self.close()
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
return True
def Command(self, *args):
# print args
self._command(args)
return True
class ApplyTransformationPanel(uiadaptor):
# savedialog dont work
def setup(self, epmv, id=None):
self.subdialog = True
self.block = True
self.epmv = epmv
self.title = "Apply Coordinates" # +self.mol.name
self.SetTitle(self.title)
witdh = 350
self.h = 130
self.w = 300
if id is not None:
id = id
else:
id = self.bid
self.id = id
# define the widget here too
self.BTN = {}
# need a filename + browse button
self.BTN["transf"] = self._addElemt(label="From :",
width=40, height=10)
self.sObject = ["cpk", "lines", "bones", "spline"] # pointsClouds ?
self.COMB_BOX = self._addElemt(name="Object",
value=self.sObject,
width=60, height=10, action=None,
variable=self.addVariable("int", 0),
type="pullMenu", )
self.BTN["ok"] = self._addElemt(name="Ok", width=40, height=10,
action=self.transform, type="button")
self.BTN["cancel"] = self._addElemt(name="Cancel", width=40, height=10,
action=self.close, type="button")
self.LABEL = self._addElemt(label="Transform the actual PDB coordinates", width=100)
self.setupLayout()
def setupLayout(self):
# form layout for each SS types ?
self._layout = []
self._layout.append([self.LABEL, ])
self._layout.append([self.BTN["transf"], self.COMB_BOX])
self._layout.append([self.BTN["ok"], self.BTN["cancel"]])
def transform(self, *args):
self.mol = self.epmv.gui.current_mol
# get the filename
mode = self.sObject[self.getLong(self.COMB_BOX)]
# do the transform
self.epmv.updateMolAtomCoord(self.mol, 0, types=mode)
self.drawMessage(message=self.mol.name + " transformed from " + mode + "!\n")
self.close()
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
return True
def Command(self, *args):
# print args
self._command(args)
return True
class Parameter_epmvGUI(uiadaptor):
# TODO : should use userpref of self.mv
# self.setUserPreference
def setup(self, epmv, id=None):
self.subdialog = True
self.title = "Preferences"
self.SetTitle(self.title)
self.epmv = epmv
# special blender 2.4
self.block = True
self.scrolling = False
witdh = 180
self.h = 600
self.w = 250
if id is not None:
id = id
else:
id = self.bid
self.id = id
# need to split in epmv options and gui options - >guiClass?
self.EPMVOPTIONS = {}
self.LABELS = {}
self.ORDERS = {}
self.ORDERS["loading"] = {"label": "Loading a molecule:",
"elem": ["dsAtLoad", "center_mol", "forceFetch", "removeWater", "build_bonds",
"bonds_threshold", "bicyl", "force_pross", "join_ss",
"uniq_ss", "use_instances", "center_grid", "doCamera",
"doLight", ]}
self.ORDERS["interaction"] = {"label": "Interaction:",
"elem": ["use_progressBar", "updateColor", "synchro_realtime",
"synchro_timeline", "synchro_ratio"]}
self.ORDERS["extension"] = {"label": "Extensions (if installed):",
"elem": ["useModeller", "usePymol"]}
if self.host == "maya":
self.ORDERS["loading"]["elem"].append("spherestype")
if self.host == "c4d":
self.ORDERS["loading"]["elem"].append("ribcolor")
for key in self.epmv.keywords:
if self.epmv.keywords[key] is None:
continue
if "label" in self.epmv.keywords[key]:
self.LABELS[key] = self._addElemt(label=self.epmv.keywords[key]["label"],
width=80)
if key != "synchro_ratio" \
and key != "synchro_timeline" and key != "bonds_threshold" and key != "minmaxCMSgrid":
self.EPMVOPTIONS[key] = self._addElemt(
name=self.epmv.keywords[key]["name"],
width=witdh, height=10,
action=None, type=self.epmv.keywords[key]["type"],
icon=None,
value=self.epmv.keywords[key]["value"],
variable=self.addVariable("int", self.epmv.keywords[key]["value"]))
if key == "bonds_threshold":
self.EPMVOPTIONS["bonds_threshold"] = self._addElemt(
name=self.epmv.keywords["bonds_threshold"]["name"],
width=witdh, height=10,
action=None, type=self.epmv.keywords["bonds_threshold"]["type"],
icon=None,
value=self.epmv.keywords[key]["value"],
mini=self.epmv.keywords["bonds_threshold"]["mini"],
maxi=self.epmv.keywords["bonds_threshold"]["maxi"],
variable=self.addVariable("float", self.epmv.keywords[key]["value"]))
# special case of synchro_ratio
self.SRATIO = [[self._addElemt(
name=self.epmv.keywords["synchro_timeline"]["name"],
width=witdh, height=10,
action=None, type=self.epmv.keywords["synchro_timeline"]["type"],
icon=None,
variable=self.addVariable("int", 0)), ],
[self._addElemt(
name=self.epmv.keywords["synchro_ratio"][0]["name"],
width=80, height=10,
action=None,
type=self.epmv.keywords["synchro_ratio"][0]["type"],
icon=None,
value=self.epmv.keywords["synchro_ratio"][0]["value"],
mini=self.epmv.keywords["synchro_ratio"][0]["mini"],
maxi=self.epmv.keywords["synchro_ratio"][0]["maxi"],
variable=self.addVariable("int", self.epmv.keywords["synchro_ratio"][0]["value"])),
self._addElemt(
label=self.epmv.keywords["synchro_ratio"][0]["name"],
width=120), ],
[self._addElemt(
name=self.epmv.keywords["synchro_ratio"][1]["name"],
width=80, height=10,
action=None,
type=self.epmv.keywords["synchro_ratio"][1]["type"],
icon=None,
value=self.epmv.keywords["synchro_ratio"][1]["value"],
mini=self.epmv.keywords["synchro_ratio"][1]["mini"],
maxi=self.epmv.keywords["synchro_ratio"][1]["maxi"],
variable=self.addVariable("int", self.epmv.keywords["synchro_ratio"][1]["value"])),
self._addElemt(
label=self.epmv.keywords["synchro_ratio"][1]["name"],
width=120)]]
self.BTN = self._addElemt(name="Apply and Close", action=self.SetPreferences, width=50,
type="button")
# slider preferences
self.setupLayout_frame()
# print self.host
if self.host != "maya" and self.host != "blender26" and self.host != "qt" and self.host != "3dsmax":
# print "restore"
self.restorePreferences()
return True
def setupLayout(self):
self._layout = []
k = list(self.EPMVOPTIONS.keys())
k.sort()
for key in k:
if "label" in self.epmv.keywords[key]:
self._layout.append([self.LABELS[key], self.EPMVOPTIONS[key], ])
else:
self._layout.append([self.EPMVOPTIONS[key], ])
self._layout.append(self.SRATIO[0])
self._layout.append(self.SRATIO[1])
self._layout.append(self.SRATIO[2])
self._layout.append([self.BTN, ])
def setupLayout_frame(self):
self._layout = []
for gr in self.ORDERS:
elem = []
k = self.ORDERS[gr]["elem"]
for key in k:
if key == "synchro_ratio":
elem.append(self.SRATIO[1])
elem.append(self.SRATIO[2])
elif key == "synchro_timeline":
elem.append(self.SRATIO[0])
else:
if "label" in self.epmv.keywords[key]:
elem.append([self.LABELS[key], self.EPMVOPTIONS[key], ])
else:
elem.append([self.EPMVOPTIONS[key], ])
frame = self._addLayout(id=196, name=self.ORDERS[gr]["label"], elems=elem, collapse=False) # ,type="tab")
self._layout.append(frame)
self._layout.append([self.BTN, ])
def CreateLayout(self):
self._createLayout()
# self.restorePreferences()
if self.host != "maya" and self.host != "blender26":
# print "restore"
self.restorePreferences()
return True
def SetPreferences(self, *args):
# print(args)
for key in self.EPMVOPTIONS:
# print(key)
if self.EPMVOPTIONS[key]["type"] == "pullMenu":
val = self.epmv.listeKeywords[key][self.getLong(self.EPMVOPTIONS[key])]
setattr(self.epmv, key, val)
else:
setattr(self.epmv, key, self.getVal(self.EPMVOPTIONS[key]))
self.epmv.synchro_timeline = self.getBool(self.SRATIO[0][0])
self.epmv.synchro_ratio[0] = self.getLong(self.SRATIO[1][0])
self.epmv.synchro_ratio[1] = self.getLong(self.SRATIO[2][0])
if self.epmv.useModeller and self.epmv._modeller:
# self.epmv.center_mol = False
# self.epmv.center_grid = False
if self.epmv.env is None:
from ePMV.extension.Modeller.pmvAction import setupENV
# setup Modeller
self.epmv.env = setupENV()
# if self.epmv.synchro_realtime:
self.epmv.synchronize()
# #self.AskClose()
# if self.epmv.gui._depthQ :
# self.epmv.helper.create_environment('depthQ',distance = 30.)
# else :
# obj=self.epmv.helper.getObject('depthQ')
# if obj is not None :
# self.epmv.helper.toggleDisplay(obj,False)
self.close()
def restorePreferences(self):
for key in self.EPMVOPTIONS:
if self.EPMVOPTIONS[key]["type"] == "pullMenu":
# print (key,self.epmv.listeKeywords[key],getattr(self.epmv,key))
val = self.epmv.listeKeywords[key].index(getattr(self.epmv, key))
self.setLong(self.EPMVOPTIONS[key], val)
else:
print (key, self.EPMVOPTIONS[key], getattr(self.epmv, key))
self.setVal(self.EPMVOPTIONS[key], getattr(self.epmv, key))
self.setBool(self.SRATIO[0][0], self.epmv.synchro_timeline)
self.setLong(self.SRATIO[1][0], self.epmv.synchro_ratio[0])
self.setLong(self.SRATIO[2][0], self.epmv.synchro_ratio[1])
def Command(self, *args):
# print args
self._command(args)
return True
# should be called uiDialog, and uiSubDialog ?
class epmvGui(uiadaptor):
# TODO complete the command callback
#
restored = False
status = 0
link = 0
nF = 1000
__version__ = ePMV.__version__ # "0.5.59"
__about__ = "ePMV v" + __version__ + "\n"
__about__ += "uPy v" + upy.__version__ + "\n"
__about__ += """:
ePMV by Ludovic Autin,Graham Jonhson,Michel Sanner.
Develloped in the Molecular Graphics Laboratory directed by Arthur Olson.
The Scripps Research Insititute"""
__url__ = ["http://epmv.scripps.edu",
'https://upy.googlecode.com/svn/branches/updates/update_notes_all.json',
'http://epmv.scripps.edu/documentation/citations-informations', ]
host = ""
current_script = None
def setup(self, epmv=None, rep="epmv", mglroot="", host=''):
if not host:
if not self.host:
self.host = epmv.host
elif not self.host:
self.host = host
# print "selfdict ",self.__dict__
# print dir(self)
self.restored = False
if epmv is None:
# try to restore
print("try to restore")
epmv = self._restore('mv', rep)
print("ok restore ", epmv)
if epmv is None:
print ("start ePMV")
epmv = ePMV.epmv_start(self.host, debug=0)
if mglroot:
epmv.mglroot = mglroot
else:
epmv.mglroot = ""
epmv.gui = self
epmv.initOption()
else:
self.restored = True
self._store('mv', {epmv.rep: epmv})
print ("epmv started")
self.epmv = epmv
self.mv = epmv.mv
self.helper = self.epmv.helper
self.funcColor = [self.mv.colorByAtomType,
self.mv.colorAtomsUsingDG,
self.mv.colorByResidueType,
self.mv.colorResiduesUsingShapely,
self.mv.colorBySecondaryStructure,
self.mv.colorByChains,
self.mv.colorByDomains,
self.mv.color,
self.mv.colorByProperty,
]
# print self.funcColor
self.colSchem = ['Atoms using CPK',
'AtomsDG (polarity/charge)',
'Per residue',
'Per residue shapely',
'Secondary Structure',
'Chains',
'Domains',
'Custom color',
'Rainbow from N to C',
'Temperature Factor',
'sas area',
]
# before creating the menu check the extensiosn
# self.checkExtension()
# as the recent File
self.title = "ePMV"
self.y = 620
self.w = 160
self.h = 150
print ("epmv settitle")
self.SetTitle(self.title)
print ("epmv set materials")
self.epmv.setupMaterials()
print ("epmv check extension")
self.epmv.checkExtension()
self.initWidget()
self.setupLayout()
self.pymolgui = None
self.pym = None
self.register = None
self.current_mol = None
if self.host != 'qt': # self.host != 'blender25' and
self.checkRegistration()
# setupoption?
self.firstTime = {}
self.firstTime["ss"] = True
# self.check_update()
def setDefaults(self, *args):
# if self.host != 'blender25' : self.setBool(self.CHECKBOXS["sel"],1)
pass
def drawRegisterUI(self, *args):
if self.register is None:
self.register = Register_User_ePMV_ui()
self.register.setup()
self.drawSubDialog(self.register, 255555643)
def isRegistred(self, *args):
# from user import home#this dont work with python3
from Support.version import __version__
home = os.path.expanduser("~")
self.rc = home + os.sep + ".mgltools" + os.sep + __version__
if not self.rc:
return False
regfile = self.rc + os.sep + ".registration"
if not os.path.exists(regfile):
return False
return True
def checkRegistration(self):
# after 3 use ask for registration or discard epmv
if not self.isRegistred():
self.register = Register_User_ePMV_ui()
self.register.setup()
self.drawSubDialog(self.register, 255555643) # ,asynchro = False)
if not self.isRegistred():
return False
return True
def CreateLayout(self):
self._createLayout()
if self.restored:
for mol in self.mv.Mols:
print(("restore ", mol.name))
self.addItemToPMenu(self.COMB_BOX["mol"], mol.name)
for dataname in self.mv.iMolData[mol.name]:
print(("restore dataname ", dataname))
self.addItemToPMenu(self.COMB_BOX["dat"], dataname)
self.current_traj = self.mv.iTraj[1]
# self.buttonLoad(None,mname=mol.name)
# self.firstmol = False
# #need to restore the data
# for dataname in self.mv.iMolData[mol.name] :
# print "dataname ",dataname
# self.buttonLoadData(None,trajname=dataname,molname=mol.name)
# #need to restore the selection
# if mol.name in self.mv.MolSelection.keys():
# self.add_Selection(n=mol.name)
# #for selname in self.mv.MolSelection[mol.name].keys() :
# self.addChildToMolMenu(selname)
self.restored = False
self.setDefaults()
return True
def Command(self, *args):
# print args
# if not self.register.registered :
# print ('Please Register')
# self.drawMessage(title='Register',message = 'Please Register')
# self.close()
self._command(args)
return True
def writeRecentFileXml(self):
# where ?
f = open(ePMV.__path__[0] + os.sep + "recentfile.xml", "w")
from xml.dom.minidom import getDOMImplementation
impl = getDOMImplementation()
# what about afviewer
xmldoc = impl.createDocument(None, "recentfile", None)
root = xmldoc.documentElement
if "Documents" in self.mv.recentFiles.categories:
for i, r in enumerate(self.mv.recentFiles.categories["Documents"]):
if r[1] == "readMolecule":
filenode = xmldoc.createElement("file")
root.appendChild(filenode)
filenode.setAttribute("path", str(r[0]))
xmldoc.writexml(f, indent="\t", addindent="", newl="\n")
f.close()
def initWidget(self, id=None):
if id is not None:
id = id
else:
id = self.bid
self.id = id
self.iconsdir = self.epmv.mglroot + os.sep + "MGLToolsPckgs" + os.sep + "ePMV" + \
os.sep + "images" + os.sep + "icons" + os.sep
self.menuorder = ["File", "Edit", "Extensions", "Help"]
# submenu recentFile.
self.submenu = None
# if self.host == "3dsmax" : self.writeRecentFileXml()
if "Documents" in self.mv.recentFiles.categories:
self.submenu = {}
for i, r in enumerate(self.mv.recentFiles.categories["Documents"]):
if r[1] == "readMolecule":
self.submenu[str(self.id - 1)] = self._addElemt(name=r[0],
action=self.loadRecentFile)
# write the recentfile in xml
# self.apbsub = {}
# self.apbsub[str(self.id-1)]=self._addElemt(name="Compute Potential Using APBS",
# action=self.runAPBS)
self.hyrdogen_sub = {}
self.hyrdogen_sub[str(self.id - 1)] = self._addElemt(name="Add Hydrogens",
action=self.addHydrogen)
self.hyrdogen_sub[str(self.id - 1)] = self._addElemt(name="Delete Hydrogens",
action=self.delHydrogen)
# print(("submenu",self.submenu))
self._menu = self.MENU_ID = {"File":
[self._addElemt(name="Recent Files", action=None, sub=self.submenu),
self._addElemt(name="Open PDB", action=self.browsePDB), # self.buttonLoad},
self._addElemt(name="Save PDB", action=self.savePDB),
self._addElemt(name="Open Data", action=self.browseDATA),
self._addElemt(name="Exit ePMV", action=self.close),
], # self.buttonLoadData
"Edit":
[self._addElemt(name="ePMV Preferences", action=self.drawPreferences),
self._addElemt(name="Colors palettes", action=self.drawPalette),
self._addElemt(name="Delete Water", action=self.delWater),
self._addElemt(name="Hydrogens", action=None, sub=self.hyrdogen_sub),
self._addElemt(name="Biological unit", action=self.drawBIOMT),
self._addElemt(name="Crystal cell", action=self.drawCrystal),
self._addElemt(name="Apply Transformation", action=self.applyTransf),
self._addElemt(name="Bind Geom to Molecule", action=self.bindGeom),
self._addElemt(name="Join SS geometry", action=self.joinSS),
],
# "Compute" :
# [self._addElemt(name="Electrostatics",action=None,sub=self.apbsub),
# ],
# self.drawPreferences}],
# [{"id": id+3, "name":"Camera&c&","action":self.optionCam},
# {"id": id+4, "name":"Light&c&","action":self.optionLight},
# {"id": id+5, "name":"CloudsPoints&c&","action":self.optionPC},
# {"id": id+6, "name":"BiCylinders&c&","action":self.optionCyl}],
"Extensions": [
self._addElemt(name="APBS Electrostatics", action=self.drawAPBS),
self._addElemt(name="PyAutoDock", action=self.drawPyAutoDock),
self._addElemt(name="BuildDNA(w3DNA)", action=self.drawBuildDNA),
],
"Help":
[self._addElemt(name="About ePMV", action=self.drawAbout), # self.drawAbout},
self._addElemt(name="ePMV documentation", action=self.launchBrowser),
# self.launchBrowser},
self._addElemt(name="Check for stable updates", action=self.stdCheckUpdate),
self._addElemt(name="Check for latest development updates",
action=self.devCheckUpdate),
self._addElemt(name="Citation Informations", action=self.citationInformation),
# self.citationInformation},
],
}
if self.epmv._AF:
self.MENU_ID["Extensions"].append(self._addElemt(name="AutoFill",
action=None)) # self.launchAFgui})
if self.epmv._AR:
self.MENU_ID["Extensions"].append(self._addElemt(name="ARViewer",
action=None)) # self.launchARgui})
if self.epmv._modeller:
self.MENU_ID["Extensions"].append(self._addElemt(name="Modeller",
action=self.drawModellerGUI)) # self.modellerGUI})
if self.epmv._pymol:
self.MENU_ID["Extensions"].append(self._addElemt(name="Pymol",
action=self.drawPymolGUI)) # self.modellerGUI})
if self.epmv._prody:
self.MENU_ID["Extensions"].append(self._addElemt(name="ProdyNMA",
action=self.drawProdyGUI)) # self.modellerGUI})
self.MENU_ID["Extensions"].append(self._addElemt(name="Add an Extension",
action=self.addExtensionGUI)) # self.addExtensionGUI})
if not self.isRegistred():
self.MENU_ID["Help"].append(self._addElemt(name="Register",
action=self.drawRegisterUI))
if self.epmv.soft == "blender25":
self.setupMenu()
self.LABEL_ID = []
self.LABEL_ID.append(self._addElemt(label="to a PDB file OR enter a 4 digit ID (e.g. 1crn):",
width=120))
self.LABEL_ID.append(self._addElemt(label="", width=1))
self.LABEL_ID.append(self._addElemt(label="Current selection :", width=50))
self.LABEL_ID.append(self._addElemt(label="Add selection set using string or", width=120))
self.LABEL_ID.append(self._addElemt(label="Scheme:", width=50))
self.LABEL_ID.append(self._addElemt(label="to load a Data file", width=50))
self.LABEL_ID.append(self._addElemt(label="to Current Selection and play below:", width=120))
self.LABEL_ID.append(self._addElemt(label="PMV-Python scripts/commands", width=50))
self.LABEL_ID.append(self._addElemt(label="Molecular Representations", width=50))
self.LABEL_ID.append(self._addElemt(label="Apply", width=20))
self.LABEL_ID.append(self._addElemt(label="a Selection Set", width=50))
self.LABEL_ID.append(self._addElemt(label="atoms in the Selection Set", width=120))
self.LABEL_ID.append(self._addElemt(label="or", width=10))
self.LABEL_ID.append(self._addElemt(label="or", width=10))
self.LABEL_ID.append(self._addElemt(label=":", width=10))
self.LABEL_ID.append(self._addElemt(label="Or choose a custom color", width=100))
self.LABEL = {}
self.LABEL["uv1"] = self._addElemt(label="Create Texture Mapping for", width=100)
self.LABEL["uv2"] = self._addElemt(label="Using", width=100)
self.LABEL["molstat"] = self._addElemt(label="mol is c chains r residue a atoms", width=100)
self.LABEL["datastat"] = self._addElemt(label="data min: max:", width=100)
self.LABEL_VERSION = self._addElemt(label='welcome to ePMV ' + self.__version__, width=100)
self.pdbid = self.addVariable("str", "1crn")
self.EDIT_TEXT = self._addElemt(name="pdbId", action=None, width=100,
value="1crn", type="inputStr", variable=self.pdbid)
self.LOAD_BTN = self._addElemt(name="Browse", width=40, height=10,
action=self.browsePDB, type="button") # self.buttonBrowse
self.SAVE_BTN = self._addElemt(name="Save", width=40, height=10,
action=self.savePDB, type="button") # self.buttonBrowse
self.FETCH_BTN = self._addElemt(name="Fetch", width=40, height=10,
action=self.fetchPDB, type="button") # self.buttonLoad}
self.DATA_BTN = self._addElemt(name="Browse", width=40, height=10,
action=self.browseDATA, type="button") # self.buttonLoadData}
self.PMV_BTN = self._addElemt(name="Exec", width=80, height=10,
action=self.execPmvComds, type="button") # self.execPmvComds}
# self.KEY_BTN= {"id":id,"name":"store key-frame",'width':80,"height":10,
# "action":None}
#
# self.DEL_BTN= self._addElemt(id=id,name="Delete",width=80,height=10,
# action=self.deleteMol,type="button")
#
# print "id del button", id-1
self.BTN = {}
self.BTN["uv"] = self._addElemt(name="Create", width=80, height=10,
action=self.createTexture, type="button")
self.BTN["bead"] = self._addElemt(name="Options", width=80, height=10,
action=self.drawBeadOption, type="button")
self.BTN["cgeom"] = self._addElemt(name="Add custom Geom", width=100, height=10,
action=self.bindGeom, type="button")
# values and variable definition
self.datatype = ['e.g.', 'Trajectories:', ' .trj', ' .xtc', 'VolumeGrids:']
DataSupported = '\.mrc$|\.MRC$|\.cns$|\.xplo*r*$|\.ccp4*$|\.grd$|\.fld$|\.map$|\.omap$|\.brix$|\.dsn6$|\.dn6$|\.rawiv$|\.d*e*l*phi$|\.uhbd$|\.dx$|\.spi$'
DataSupported = DataSupported.replace("\\", " ").replace("$", "").split("|")
self.datatype.extend(DataSupported)
self.presettype = ['available presets:', ' Lines', ' Liccorice', ' SpaceFilling',
' Ball+Sticks', ' RibbonProtein+StickLigand',
' RibbonProtein+CPKligand', ' Custom',
' Save Custom As...'] # ' xray',
self._preset = self.addVariable("int", 1)
self.keyword = ['keywords:', ' backbone', ' sidechain', ' chain', ' picked']
from MolKit.protein import ResidueSetSelector
kw = [" " + x for x in list(ResidueSetSelector.residueList.keys())]
self.keyword.extend(kw)
self._keyword = self.addVariable("int", 1)
self.scriptliste = ['Open:',
'pymol_demo',
'interactive_docking',
'surface_per_chain',
'colorbyAPBS',
'demo1',
'user_script']
self.scriptsave = ['Save', 'Save as']
self._eOscript = self.addVariable("int", 1)
self._eSscript = self.addVariable("int", 1)
self.editselection = ['Save set', 'Rename set', 'Delete set']
self._eSelection = self.addVariable("int", 1)
self.pdbtype = ['PDB', 'TMPDB', 'OPM', 'CIF', 'PQS']
self._pdbtype = self.addVariable("int", 1)
self.currentmolvar = self.addVariable("int", 1)
self.colvar = self.addVariable("int", 1)
self.datvar = self.addVariable("int", 1)
self.boneslevel = ["Trace", "Backbone", "Full Atoms", "Domain", "Chain", "Mol", "Selection"]
self._bonesLevel = self.addVariable("int", 1)
self.uvselection = ["unwrapped mesh UV", "regular disposed triangle"]
self._uvselection = self.addVariable("int", 1)
self._customgeom = self.addVariable("int", 1)
self.customgeom = ["None", ]
self.COMB_BOX = {"mol": self._addElemt(name="CurrentMol", value=[],
width=60, height=10, action=self.setCurMol,
variable=self.currentmolvar,
type="pullMenu"), # self.setCurMol
"col": self._addElemt(name="Color",
value=self.colSchem,
width=80, height=10, action=self.color,
variable=self.colvar,
type="pullMenu", ), # self.color},
"dat": self._addElemt(name="Data", value=["None"],
width=60, height=10, action=self.updateTraj,
variable=self.datvar,
type="pullMenu", ), # self.updateTraj},
"pdbtype": self._addElemt(name="Fetch",
value=self.pdbtype,
width=50, height=10, action=None,
variable=self._pdbtype,
type="pullMenu", ),
"datatype": self._addElemt(name="DataTypes",
value=self.datatype,
width=20, height=10, action=None,
variable=self.addVariable("int", 1),
type="pullMenu", ),
"preset": self._addElemt(name="Preset",
value=self.presettype,
width=100, height=10, action=self.drawPreset,
variable=self._preset,
type="pullMenu", ), # self.drawPreset},
"keyword": self._addElemt(name="Keyword",
value=self.keyword,
width=80, height=10, action=self.setKeywordSel,
variable=self._keyword,
type="pullMenu", ), # self.setKeywordSel},
"scriptO": self._addElemt(name="ScriptO",
value=self.scriptliste,
width=80, height=10, action=self.set_ePMVScript,
variable=self._eOscript,
type="pullMenu", ), # self.set_ePMVScript},
"scriptS": self._addElemt(name="ScriptS",
value=self.scriptsave,
width=80, height=10, action=self.save_ePMVScript,
variable=self._eSscript,
type="pullMenu", ), # self.save_ePMVScript},
"selection": self._addElemt(name="Selection",
value=self.editselection,
width=80, height=10, action=self.edit_Selection,
variable=self._eSelection,
type="pullMenu", ), # elf.edit_Selection},
"bones": self._addElemt(name="Bones Level",
value=self.boneslevel,
width=80, height=10, action=None,
variable=self._bonesLevel,
type="pullMenu", ),
"uv": self._addElemt(name="Mapping:",
value=self.uvselection,
width=80, height=10, action=None,
variable=self._uvselection,
type="pullMenu", ),
"cgeom": self._addElemt(name="Customs Geom:",
value=self.customgeom,
width=80, height=10, action=self.updateCGeomDisplay,
variable=self._customgeom,
type="pullMenu", ),
}
deflt = "/Users/Shared/uv.png"
self.uvid = self.addVariable("str", deflt)
self.INPUTSTR = {}
self.INPUTSTR["uv"] = self._addElemt(name="image filename", action=None, width=120,
value=deflt, type="inputStr", variable=self.uvid)
self.INPUTSTR["uvg"] = self._addElemt(name="geom", action=None, width=120,
value="MSMSMOL", type="inputStr", variable=self.uvid)
deflt = '(Mol:Ch:Rletter:Atom), eg "1CRN:A:ALA:CA", \
or keywords: BACKBONE, SIDECHAINS, etc...'
self.selid = self.addVariable("str", deflt)
self.SELEDIT_TEXT = self._addElemt(name="selection", action=self.updateSelection, width=120,
value=deflt, type="inputStr", variable=self.selid)
self.SEL_BTN = {"add": self._addElemt(name="Save set", width=45, height=10,
action=None, type="button"), # self.add_Selection},
"rename": self._addElemt(name="Rename", width=45, height=10,
action=None, type="button"), # self.rename_Selection},
"deleteS": self._addElemt(name="Delete Set", width=45, height=10,
action=None, type="button"), # self.delete_Selection},
"deleteA": self._addElemt(name="Delete", width=45, height=10,
action=self.delete_Atom_Selection, type="button"),
# self.delete_Atom_Selection}
}
# do we need check button for other representation? ie lineMesh,cloudMesh etc..
self.CHECKBOXS = {"cpk": self._addElemt(name="Atoms", width=80, height=10,
action=self.dsCPK, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displayCPK},
"bs": self._addElemt(name="Sticks", width=80, height=10,
action=self.dsBS, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displayBS},
"ss": self._addElemt(name="Ribbons", width=80, height=10,
action=self.dsSS, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displaySS},
"bead": self._addElemt(name="BeadedRibbons", width=80, height=10,
action=self.dsBR, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displaySS},
"loft": self._addElemt(name="Worm", width=80, height=10,
action=self.dsLoft, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.createLoft},
"arm": self._addElemt(name="Armature", width=80, height=10,
action=self.dsBones, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.createArmature},
"spline": self._addElemt(name="Spline", width=80, height=10,
action=self.dsSpline, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.createSpline},
"surf": self._addElemt(name="MSMSurf", width=80, height=10,
action=self.dsMSMS, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displaySurf},
"cms": self._addElemt(name="CoarseMolSurf", width=80, height=10,
action=self.dsCMS, type="checkbox", icon=None,
variable=self.addVariable("int", 0)), # self.displayCoarseMS},
"meta": self._addElemt(name="Metaballs", width=80, height=10,
action=self.dsMeta, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
"cgeom": self._addElemt(name="Toggle Display", width=80, height=10,
action=self.dsCustomGeom, type="checkbox", icon=None,
variable=self.addVariable("int", 0)),
}
# if self.host != "maya":
self.CHECKBOXS["points"] = self._addElemt(name="PointClouds", width=80, height=10,
action=self.dsPoints, type="checkbox", icon=None,
variable=self.addVariable("int", 0)) # self.displayMetaB}
self.CHECKBOXS["lines"] = self._addElemt(name="Lines", width=80, height=10,
action=self.dsLines, type="checkbox", icon=None,
variable=self.addVariable("int", 0)) # self.displayMetaB}
self.TOGGLESEL = self._addElemt(name="Show", width=80, height=10,
action=self.toggleSelection, type="checkbox", icon=None,
variable=self.addVariable("int", 1))
# need a variable for each one
# no label for theses
# do we need slider button for other representation? ie metaball?/loft etc..
a = "hfit_scale"
self.SLIDERS = {"cpk": self._addElemt(name="cpk_scale", width=80, height=10,
action=self.dsCPK, type="sliders", label="scale",
variable=self.addVariable("float", 1.0),
mini=0.01, maxi=5., step=0.01, alignement=a), # self.displayCPK},
"bs_s": self._addElemt(name="bs_scale", width=80, height=10,
action=self.dsBS, type="sliders", label="scale",
variable=self.addVariable("float", 1.0),
mini=0.0, maxi=10., step=0.01, alignement=a), # self.displayBS},
"bs_r": self._addElemt(name="bs_ratio", width=80, height=10,
action=self.dsBS, type="sliders", label="ratio",
variable=self.addVariable("float", 1.5),
mini=0.0, maxi=10., step=0.01, alignement=a), # self.displayBS},
"surf": self._addElemt(name="probe", width=80, height=10,
action=self.updateMSMS, type="sliders", label="probe radius",
variable=self.addVariable("float", 1.4),
mini=0.001, maxi=10., step=0.01, alignement=a), # self.updateSurf},
"surfdensity": self._addElemt(name="density", width=80, height=10,
action=self.updateMSMS, type="sliders", label="triangle density",
variable=self.addVariable("float", 3.0),
mini=1.0, maxi=10., step=0.01, alignement=a), # self.updateSurf},
"cmsI": self._addElemt(name="isovalue", width=80, height=10,
action=self.updateCMS, type="sliders", label="isovalue",
variable=self.addVariable("float", 7.1),
mini=0.01, maxi=10., step=0.01, alignement=a), # self.updateCMS},
"cmsR": self._addElemt(name="resolution", width=80, height=10,
action=self.updateCMS, type="sliders", label="resolution",
variable=self.addVariable("float", -0.3),
mini=-5.0, maxi=-0.0001, step=0.001, precision=4, alignement=a),
# self.updateCoarseMS},
"cmsG": self._addElemt(name="grid size", width=80, height=10,
action=self.updateCMS, type="sliders", label="grid size",
variable=self.addVariable("int", 32),
mini=2, maxi=100, step=1, alignement=a),
# self.updateCoarseMS},
# "meta":{"id":id+8,"name":"MBalls",'width':15,"height":10,"action":self.displayMetaB}
"datS": self._addElemt(name="state", width=100, height=10,
action=self.applyState, type="sliders", label="step/value",
variable=self.addVariable("float", 0.),
mini=-10., maxi=10., step=0.01, alignement=a), # self.applyState},
# "datV":{"id":id+7,"name":"value",'width':15,"height":10,"action":self.applyValue},
}
# slider labels
a = "hleft"
self.SLIDERS_LABEL = {}
for key in self.SLIDERS:
if self.SLIDERS[key]["label"]:
self.SLIDERS_LABEL[key] = self._addElemt(
label=self.SLIDERS[key]["label"],
width=50, alignement=a)
self.COLFIELD = self._addElemt(name="chooseCol", action=self.color,
variable=self.addVariable("col", (1., 1., 0.)),
value=(1., 1., 0.),
type="color", width=30, height=15) # self.chooseCol}
txt = "\n\nprint 'put your own commands here'\nprint 'with self = PMV instance, and epmv as ePMV'\n"
self._script = self.addVariable("str", txt)
self.SCRIPT_TEXT = self._addElemt(name="epmvScript", action=None, width=200,
value=txt, type="inputStrArea", variable=self._script, height=200)
bannerfile = self.epmv.mglroot + os.sep + "MGLToolsPckgs" + os.sep + "ePMV" + \
os.sep + "images" + os.sep + "banner.jpg"
self.BANNER = self._addElemt(name="banner", value=bannerfile, type="image")
# options panels and subwindows
self.pd = ParameterModeller(title="modeller")
self.pd.setup(self.epmv)
self.options = Parameter_epmvGUI()
self.options.setup(self.epmv)
self.ad = ParameterScoringGui()
self.ad.setup(self.epmv)
self.beadUi = Parameter_beadRibbons()
self.beadUi.setup(self.epmv)
self.apbsgui = APBSgui(title="APBS")
self.apbsgui.setup(epmv=self.epmv)
self.pmvPalgui = Parameter_pmvPalette()
self.pmvPalgui.setup(epmv=self.epmv)
self.saveGui = SavePanel()
self.saveGui.setup(epmv=self.epmv)
self.applyPanel = ApplyTransformationPanel()
self.applyPanel.setup(epmv=self.epmv)
self.bindPanel = BindGeomToMol()
self.bindPanel.setup(epmv=self.epmv)
self.dnaPanel = BuildDNAGui()
self.dnaPanel.setup(epmv=self.epmv)
if self.epmv._prody:
from ePMV.extension._prody import prodyGui
self.prodygui = prodyGui.prodyGui()
self.prodygui.setup(epmv=self.epmv)
# TODO
# self.argui = ParameterARViewer()
# self.argui.setup(self.epmv)
def setupLayout(self):
# epmv layout:
# first is the Menu / last as in blender self.MENU_ID did by the adaptor
# then is the pdb browse/fetch buttons
# Load Molecule
# line1
# need to reset the layout for restore purpose
# collapse=False
self._layout = []
elemFrame = []
elemFrame.append([self.LOAD_BTN, self.LABEL_ID[0], self.LABEL_ID[1]])
elemFrame.append([self.EDIT_TEXT, self.FETCH_BTN, self.COMB_BOX["pdbtype"]])
frame = self._addLayout(id=196, name="Get a Molecule", elems=elemFrame, collapse=False)
self._layout.append(frame)
# DashBoard / Selection Display Options
elemFrame = []
elemFrame.append([self.LABEL_ID[2], self.COMB_BOX["mol"], self.SAVE_BTN, self.TOGGLESEL])
elemFrame.append([self.LABEL["molstat"], ])
elemFrame.append([self.LABEL_ID[3], self.COMB_BOX["keyword"], ])
elemFrame.append([self.SELEDIT_TEXT, self.COMB_BOX["selection"]])
elemFrame.append([self.SEL_BTN["deleteA"], self.LABEL_ID[11]])
frame = self._addLayout(id=197, name="Selections", elems=elemFrame, collapse=False)
self._layout.append(frame)
elemFrame = []
elemFrame.append([self.LABEL_ID[8], self.COMB_BOX["preset"]])
frame = self._addLayout(id=198, name="Preset Representations", elems=elemFrame)
# self._layout.append(frame)
# merge CPK bs line and point in one frame
elemFrame = []
elemFrame.append([self.CHECKBOXS["cpk"], ])
elemFrame.append([self.SLIDERS_LABEL["cpk"],
self.SLIDERS["cpk"], ])
elemFrame.append([self.CHECKBOXS["bs"], ])
elemFrame.append([self.SLIDERS_LABEL["bs_s"], self.SLIDERS["bs_s"], ])
elemFrame.append([self.SLIDERS_LABEL["bs_r"], self.SLIDERS["bs_r"], ])
# if self.host != "maya":
elemFrame.append([self.CHECKBOXS["points"], ])
elemFrame.append([self.CHECKBOXS["lines"], ])
frame = self._addLayout(id=200, name="Atom/Bond Representations", elems=elemFrame, collapse=False)
self._layout.append(frame)
elemFrame = []
elemFrame.append([self.CHECKBOXS["ss"], ])
elemFrame.append([self.CHECKBOXS["bead"], self.BTN["bead"]])
elemFrame.append([self.CHECKBOXS["arm"], self.COMB_BOX["bones"]])
elemFrame.append([self.CHECKBOXS["loft"], ])
elemFrame.append([self.CHECKBOXS["spline"], ])
frame = self._addLayout(id=202, name="Backbone Representations", elems=elemFrame)
self._layout.append(frame)
elemFrame = []
elemFrame.append([self.CHECKBOXS["surf"], ])
elemFrame.append([self.SLIDERS_LABEL["surf"], self.SLIDERS["surf"], ])
elemFrame.append([self.SLIDERS_LABEL["surfdensity"], self.SLIDERS["surfdensity"], ])
elemFrame.append([self.CHECKBOXS["cms"], ])
elemFrame.append([self.SLIDERS_LABEL["cmsI"], self.SLIDERS["cmsI"], ])
elemFrame.append([self.SLIDERS_LABEL["cmsR"], self.SLIDERS["cmsR"], ])
elemFrame.append([self.SLIDERS_LABEL["cmsG"], self.SLIDERS["cmsG"], ])
elemFrame.append([self.CHECKBOXS["meta"], ])
frame = self._addLayout(id=203, name="Surface Representations", elems=elemFrame)
self._layout.append(frame)
# line9#color what is check as display
elemFrame = []
# elemFrame.append([self.LABEL_ID[16],])
elemFrame.append([self.COMB_BOX["cgeom"], self.CHECKBOXS["cgeom"], ])
elemFrame.append([self.BTN["cgeom"], ])
frame = self._addLayout(id=208, name="Custom Representations", elems=elemFrame)
self._layout.append(frame)
# line9#color what is check as display
elemFrame = []
elemFrame.append([self.LABEL_ID[4], self.COMB_BOX["col"], ])
elemFrame.append([self.LABEL_ID[15], self.COLFIELD])
frame = self._addLayout(id=204, name="Color By", elems=elemFrame)
self._layout.append(frame)
# #lineUV -> will go in the options menu. as vertex colors to UV textur mapping (slow)
# elemFrame=[]
# elemFrame.append([self.LABEL["uv1"],self.INPUTSTR["uvg"]])#combo box OR Input
# elemFrame.append([self.LABEL["uv2"],self.COMB_BOX["uv"]])
# elemFrame.append([self.BTN["uv"],self.INPUTSTR["uv"] ])
# frame = self._addLayout(id=205,name="UV Texture mapping",elems=elemFrame)
# self._layout.append(frame)
# self._layout.append([self.LABEL_ID[4],self.COMB_BOX["col"],self.COLFIELD])
# line10#data player
elemFrame = []
elemFrame.append([self.DATA_BTN, self.LABEL_ID[5], self.COMB_BOX["datatype"]])
elemFrame.append([self.LABEL_ID[9], self.COMB_BOX["dat"], self.LABEL_ID[6]])
elemFrame.append([self.LABEL["datastat"], ])
elemFrame.append([self.SLIDERS_LABEL["datS"], self.SLIDERS["datS"]])
frame = self._addLayout(id=206, name="Data Player", elems=elemFrame)
self._layout.append(frame)
elemFrame = []
elemFrame.append([self.COMB_BOX["scriptO"], self.COMB_BOX["scriptS"]])
elemFrame.append([self.SCRIPT_TEXT, ])
elemFrame.append([self.PMV_BTN, ])
frame = self._addLayout(id=207, name=self.LABEL_ID[7]["label"], elems=elemFrame)
self._layout.append(frame)
# Version
self._layout.append([self.LABEL_VERSION, ])
# Banner if we can
self._layout.append([self.BANNER, ])
def getGeomActive(self, name):
lgeomName = []
mname, mol, sel, selection = self.getDsInfo()
# lookup moldisp
for key in self.CHECKBOXS:
if self.getBool(self.CHECKBOXS[key]):
if key == "bs":
lgeomName.append('balls')
lgeomName.append('sticks')
elif key == "ss":
if not self.epmv.uniq_ss:
lgeomName.append('secondarystructure')
else:
for ch in mol.chains:
name = "SS%s" % (ch.id)
lgeomName.append(name)
elif key == "cms":
sname = 'CoarseMS_' + str(mname)
lgeomName.append(sname)
elif key == "surf":
sname = 'MSMS-MOL' + str(mname)
# if sel != mname :
# sname='MSMS-MOL'+str(sel)
lgeomName.append(sname)
elif key == "cgeom":
# need to get the current one
g = self.customgeom[self.getLong(self.COMB_BOX["cgeom"])]
lgeomName.append("b_" + g)
elif key == "col":
continue
else:
lgeomName.append(key)
return lgeomName
def getData(self, molname, adt=False):
# if molname in self.mv.Mols.name : self.mv.hostApp.driver.duplicatemol=False
self.mv.assignAtomsRadii(str(molname), united=0, log=0, overwrite=1)
self.epmv._addMolecule(molname)
# add a child to mol pop up menu which is the current selection puulDowmn menu
self.addItemToPMenu(self.COMB_BOX["mol"], str(molname))
self.setVal(self.COMB_BOX["mol"], str(molname))
mol = self.mv.getMolFromName(molname)
self.setVal(self.LABEL["molstat"], "%s %d chains %d residues %d atoms" % (
mol.name, len(mol.chains), len(mol.chains.residues), len(mol.allAtoms)))
self.current_mol = mol
if len(mol.allAtoms[0]._coords) > 1 or self.epmv.useModeller:
# need a test about trajectories...
doit = True
if len(self.mv.iMolData[mol.name]) != 0: # ok data
for dataname in self.mv.iMolData[mol.name]:
if dataname.find('xtc') != -1:
doit = False
if doit: self.loadDATA(None, model=True, molname=molname, adt=adt)
def startingRepresentation(self):
# ["secondary structure","clouds","line","None"]
rep = self.epmv.dsAtLoad
print ("startingRepresentation", rep)
if rep == "secondary structure":
# try :
self.setBool(self.CHECKBOXS["ss"], True)
self.dsSS(None)
# except:
# pass
if rep == "beadRibbon":
try:
self.setBool(self.CHECKBOXS["bead"], True)
self.dsBR(None)
except:
pass
elif rep == "clouds":
try:
self.setBool(self.CHECKBOXS["points"], True)
self.dsPoints(None)
except:
pass
elif rep == "line":
try:
self.setBool(self.CHECKBOXS["lines"], True)
self.dsLines(None)
except:
pass
elif rep == "MSMS":
try:
self.setBool(self.CHECKBOXS["surf"], True)
self.dsMSMS(None)
except:
pass
elif rep == "CMS":
try:
self.setBool(self.CHECKBOXS["cms"], True)
self.dsCMS(None)
except:
pass
else:
pass
self.updateViewer()
def loadPDB(self, filename):
if not filename: return
molname = os.path.splitext(os.path.basename(filename))[0]
name = filename
adt = False
ext = os.path.splitext(os.path.basename(name))[1]
for ty in self.datatype:
if ty.find(ext) != -1:
self.loadDATA(filename)
# check the extension loadDATA
# if ext
# print "loadPDB",molname
# test the name lenght
if len(molname) > self.epmv.MAX_LENGTH_NAME:
try:
self.drawError(
"Sorry, but the name of the given file is to long,\nand not suppported.\n Please rename it or load another file")
except:
print (
"Sorry, but the name of the given file is to long,\nand not suppported.\n Please rename it or load another file")
return 0
# if VERBOSE :print molname, self.Mols.name, (molname in self.Mols.name)
# print ext
if ext == '.dlg': # Autodock
self.epmv.center_mol = False
self.mv.readDLG(name, 0, 0) # addToPrevious,ask
print ("read name ", self.mv.Mols)
self.mv.showDLGstates(self.mv.Mols[-1])
molname = self.mv.Mols[-1].name
adt = True
# if molname in self.mv.Mols.name :
# self.hostApp.driver.duplicatemol=True
# if VERBOSE :print self.hostApp.driver.duplicatemol
if self.epmv.useModeller and not adt:
import modeller
from modeller.scripts import complete_pdb
mdl = complete_pdb(self.epmv.env, name)
mdl.patch_ss()
name = name.split(".pdb")[0] + "m.pdb"
mdl.write(file=name)
if not adt:
print ("not adt read " + str(name))
self.mv.readMolecule(str(name))
if self.epmv.useModeller:
self.mv.Mols[-1].mdl = mdl
if self.epmv.center_mol:
self.mv.Mols[-1].pmvaction.updateModellerCoord(0, mdl)
molname = self.mv.Mols[-1].name
molname = molname.replace(".", "_")
self.mv.Mols[-1].name = molname
# if len(molname) > 7 :
# self.mv.Mols[-1].name=molname[0:6]
# molname = self.mv.Mols[-1].name
# self.epmv.testNumberOfAtoms(self.mv.Mols[-1])
self.getData(self.mv.Mols[-1].name, adt=adt)
# if self.host != "3dsmax" :
self.startingRepresentation()
self.updateViewer()
def loadRecentFile(self, *args):
# print "RF ",args, len(args)
if len(args) == 1:
if type(args[0]) is tuple:
id = args[0][0]
else:
id = args[0]
elif len(args) == 2:
if type(args[0]) is str:
id = args[0]
else:
id = args[0][0]
if self.submenu is not None:
filename = self.submenu[str(id)]["name"]
self.loadPDB(filename)
def browsePDB(self, *args):
# first need to call the ui fileDialog
try:
self.fileDialog(label="choose a file", callback=self.loadPDB)
except:
self.drawError()
def savePDB_cb(self, filename):
if filename is None:
return
self.drawSubDialog(self.saveGui, 2555441)
self.saveGui.setString(self.saveGui.FILE, filename)
self.saveGui.filename = filename
def savePDB(self, *args):
self.saveDialog(label="choose a file", callback=self.savePDB_cb)
def fetchPDB(self, *args):
name = str(self.getString(self.EDIT_TEXT))
type = self.pdbtype[self.getLong(self.COMB_BOX["pdbtype"])]
self.fetchPDB_cb(name, type)
def fetchPDB_cb(self, name, type):
dbtype = {'PDB': "Protein Data Bank (PDB)",
'TMPDB': "Protein Data Bank of Transmembrane Proteins (TMPDB)",
'OPM': "Orientations of Proteins in Membranes (OPM)",
'CIF': "Crystallographic Information Framework (CIF)",
'PQS': "PQS"}
if len(name) == 4 or len(name.split(".")[0]) == 4:
# print ("PDB id, webdownload ",name)
molname = str(name.lower())
# if molname in self.mv.Mols.name : self.mv.hostApp.driver.duplicatemol=True
self.mv.fetch.db = dbtype[type]
# print self.epmv.forceFetch
mol = self.mv.fetch(molname, f=self.epmv.forceFetch)
# print "fetch ",mol
if mol is None:
return True
self.epmv.testNumberOfAtoms(self.mv.Mols[-1])
self.getData(self.mv.Mols[-1].name)
self.updateViewer()
self.startingRepresentation()
else:
print(("enter a Valid " + type + " id Code "))
def loadDATA(self, filename, model=False, trajname=None, molname=None, adt=False):
if trajname == None:
if model:
# if self.host != "3dsmax" :
self.modelData(adt=adt)
# else :
# self.modelData_cb(adt=adt)
return True
# filename=self.GetString(self.trajectoryfile)
# if len(filename) == 0 :
if filename is None:
return True
dataname = os.path.splitext(os.path.basename(filename))[0]
extension = os.path.splitext(os.path.basename(filename))[1] # .xtc,.trj,etc..
if extension == '.xtc' or extension == '.dcd':
self.gromacsTraj(file=filename, dataname=dataname + extension)
else:
self.gridData(file=filename, dataname=dataname + extension)
# elif extension == '.map' : self.gridData_1(file=filename)
else:
print(("restore ", trajname))
if trajname.find(".model") != -1 or trajname.find(".dlg") != -1: # model conformation data
self.modelData(dataname=trajname, molname=molname)
elif trajname.find("xtc") != -1: # gromacs conformation data
self.gromacsTraj(dataname=trajname, molname=molname)
else: # autodock map conformation data
self.gridData(dataname=trajname, molname=molname)
# elif extension == '.trj' : self.amberTraj(filename)
self.updateViewer()
return True
def browseDATA(self, *args):
# first need to call the ui fileDialog
self.fileDialog(label="choose a data file", callback=self.loadDATA)
def modelData_cb(self, dataname=None, molname=None, adt=False):
if molname == None:
mname = self.current_mol.name
trajname = mname + '.model'
if adt:
trajname = mname + '.dlg'
self.mv.iMolData[mname].append(trajname)
else:
mname = molname
trajname = dataname
# self.addItemToPMenu(self.COMB_BOX["dat"],trajname)
n = len(self.mv.iTraj) + 1
self.mv.iTraj[n] = [trajname, "model"]
self.current_traj = [trajname, "model"]
def modelData(self, dataname=None, molname=None, adt=False):
if molname == None:
val = self.getLong(self.COMB_BOX["mol"])
vname = self.COMB_BOX["mol"]["value"][val]
mname, mol = self.epmv.getMolName(vname)
trajname = mname + '.model'
if adt:
trajname = mname + '.dlg'
# self.mv.iMolData[mname].append(trajname)
else:
mname = molname
trajname = dataname
# self.modelData_cb(mname,trajname,dataname=dataname,molname=molname,adt=adt)
self.addItemToPMenu(self.COMB_BOX["dat"], trajname)
self.mv.iTraj[len(self.COMB_BOX["dat"]["value"]) - 1] = [trajname, "model"]
self.current_traj = [trajname, "model"]
def gromacsTraj(self, file=None, dataname=None, molname=None):
if molname == None:
try:
val = self.getLong(self.COMB_BOX["mol"])
vname = self.COMB_BOX["mol"]["value"][val]
molname, mol = self.epmv.getMolName(vname)
nData = len(self.COMB_BOX["dat"]["value"]) + 1
except:
mname = ""
mol = self.current_mol
if mol is not None:
molname = self.current_mol.name
nData = len(self.mv.iTraj) + 1
trajname = self.gromacsTraj_cb(file=file, dataname=dataname,
molname=molname, mol=mol, nData=nData)
self.addItemToPMenu(self.COMB_BOX["dat"], trajname)
def gromacsTraj_cb(self, file=None, dataname=None, molname=None, mol=None, nData=1):
print(file)
self.mv.openTrajectory(file, log=0)
if molname == None:
trajname = os.path.basename(file)
print(trajname)
mname = molname
# val = self.getLong(self.COMB_BOX["mol"])
# vname = self.COMB_BOX["mol"]["value"][val]
# mname,mol=self.epmv.getMolName(vname)
# print (vname,mname,mol,trajname)
self.mv.iMolData[mname].append(trajname)
else:
mname = molname
trajname = dataname
self.mv.playTrajectory(mname, trajname, log=0)
print(trajname)
print((self.mv.Trajectories))
self.mv.iTraj[nData - 1] = [self.mv.Trajectories[trajname], "traj"]
self.current_traj = [self.mv.Trajectories[trajname], "traj"]
self.nF = len(self.current_traj[0].coords)
return trajname
def gridData_cb(self, file=None, dataname=None, molname=None, mol=None, nData=1):
mname = ""
self.mv.readAny(file)
name = list(self.mv.grids3D.keys())[-1]
if dataname is not None:
name = dataname
print ("gridData_cb ", file, dataname, molname, name)
# the last read data. this i a dictionry, last read is not -1
# print list(self.mv.grids3D.keys())
# if molname == None :
# sys.stderr.write('DObe')
# if mol is not None:
# self.mv.cmol = mol
# mname = mol.name
# else:
# standalone data
mname = ""
mol = None
gmname = "grid_" + str(len(self.mv.grids3D.keys())) + "IsoSurface"
# sys.stderr.write('before Select and isoContour')
# else :
# mname = molname
# trajname = dataname
self.mv.isoC.select(grid_name=name)
self.mv.isoC(self.mv.grids3D[name], name=gmname,
isovalue=0.) # self.mv.grids3D[name].mean)
trajname = name # os.path.basename(filename)
self.setReal(self.SLIDERS["datS"], 0.)
# print trajname
if mname == "":
if "IsoSurface" not in list(self.mv.iMolData.keys()):
self.mv.iMolData["IsoSurface"] = []
mname = "IsoSurface"
self.mv.iMolData[mname].append(file)
self.current_traj = self.mv.iTraj[nData - 1] = [self.mv.grids3D[trajname], "grid"]
self.nF = self.current_traj[0].maxi
# self.mv.playTrajectory(mname, trajname, log=0)
print ("nData", nData - 1)
# print trajname
# print "grid"
return trajname
def gridData(self, file=None, dataname=None, molname=None):
if molname == None:
try:
val = self.getLong(self.COMB_BOX["mol"])
vname = self.COMB_BOX["mol"]["value"][val]
print((val, vname, self.COMB_BOX["mol"]["value"]))
mname, mol = self.epmv.getMolName(vname)
# self.mv.cmol = mol
except:
# standalone data
mname = ""
mol = self.current_mol
if mol is not None:
molname = self.current_mol.name
nData = len(self.COMB_BOX["dat"]["value"]) + 1
trajname = self.gridData_cb(file=file, dataname=dataname, molname=molname, mol=mol, nData=nData)
self.addItemToPMenu(self.COMB_BOX["dat"], os.path.basename(trajname))
self.updateTraj()
def updateTraj(self, *args):
i = self.getLong(self.COMB_BOX["dat"])
if i not in list(self.mv.iTraj.keys()):
return False
self.current_traj = self.mv.iTraj[i]
mini, maxi, default, step = self.epmv.updateTraj(self.current_traj)
print((mini, maxi, default, step))
self.setVal(self.LABEL["datastat"], "data min:" + str(mini) + " max:" + str(maxi))
# print (self.SLIDERS["datS"]["value"])
self.updateSlider(self.SLIDERS["datS"], mini, maxi, default, step)
# should we update the attribute if any for timeline purpose
if self.epmv.synchro_timeline:
# update maya attrbutes
if self.host == "maya":
self.epmv.updateDataPlayerAttr(mini, maxi, default, step)
return True
def applyState(self, *args):
mname, mol, sel, selection = self.getDsInfo()
val = self.getReal(self.SLIDERS["datS"])
traj = self.current_traj
if traj is None:
# get from the puulDown menu
self.updateTraj(None)
traj = self.current_traj
# disp=[]
# if mol is not None :
# disp = self.mv.molDispl[mname]
# else :
i = self.getLong(self.COMB_BOX["dat"])
mname = "grid_" + str(i)
self.applyState_cb(mname, mol, sel, selection, val, traj=traj)
def applyState_cb(self, mname, mol, sel, selection, val, traj=None):
if traj is None:
traj = self.current_traj
print ("applyState_cb", traj)
print ("iTraj", self.mv.iTraj)
if traj is not None:
if traj[1] in ["model", "traj"]:
conf = val # self.getReal(self.SLIDERS["datS"])
self.epmv.updateData(traj, int(conf))
# if "surf" in disp:
# if disp["surf"] : self.updateMSMS(None) #shoudl I redo the coloring?
# if "cms" in disp :
# if disp["cms"] : self.updateCMS(None)
if self.epmv.updateColor: # and self.host != "3dsmax":
self.color(None)
elif self.current_traj[1] == "grid":
iso = val # self.getReal(self.SLIDERS["datS"])#isovalue
self.mv.isoC(self.current_traj[0], isovalue=iso, name=mname + "IsoSurface")
elif hasattr(self.current_traj, 'GRID_DATA_FILE'):
# grid
iso = val # self.getReal(self.SLIDERS["datS"])#isoself.GetReal(self.slider)
self.mv.setIsovalue(self.current_traj[0].name, iso, log=1)
self.updateViewer()
# return True
def getDsInfo(self, key=None):
try:
name = self.getVal(self.COMB_BOX["mol"])
# val = self.getLong(self.COMB_BOX["mol"])
# name = str(self.COMB_BOX["mol"]["value"][val])
except:
return None, None, None, None
display = None
if key is not None:
if key != "col":
display = self.getBool(self.CHECKBOXS[key])
else:
display = self.getColor(self.COLFIELD)
selString = str(self.getString(self.SELEDIT_TEXT))
return self.getDsInfo_cb(name, selString, display, key=key)
def getDsInfo_cb(self, name, selString, display, key=None):
if name is None or name == "":
return None, None, None, None
print ("getDsInfo_cb " + name, type(name), selString)
if name not in list(self.mv.selections.keys()):
mol = self.getSelectionMol(name)
if mol is None:
return None, None, None, None
mname = str(mol.name)
sel = str(self.mv.selections[mname][name])
selection = self.mv.select(str(sel),
negate=False, only=True, xor=False,
log=0, intersect=False)
self.epmv.current_selection = selection
if key is not None:
print ("ok key ", key, display)
if key != "col":
self.updateMolDsDict(sel, name, display, key)
return name, mol, sel, selection, display
return name, mol, sel, selection
mname, mol = self.epmv.getMolName(name) # just in case name is a selection..
mname = str(mname)
mol.name = str(mol.name)
print((mname, mol))
sel, selection = self.epmv.getSelectionLevel(mol, str(selString))
self.epmv.current_selection = selection
if key is not None:
# print "ok key ",key
if key != "col":
self.updateMolDsDict(sel, mname, display, key)
return mname, mol, sel, selection, display
return mname, mol, sel, selection
def updateSelection(self, *args):
display = self.getBool(self.TOGGLESEL)
if not display:
return
try:
val = self.getLong(self.COMB_BOX["mol"])
name = self.COMB_BOX["mol"]["value"][val]
except:
return None, None, None, None
mname, mol = self.epmv.getMolName(name)
selString = self.getString(self.SELEDIT_TEXT)
# print selString
sel, selection = self.epmv.getSelectionLevel(mol, selString)
# print (sel,selection,selString)
if selection is None:
newC = []
else:
newC = selection.coords
sel_particle = self.epmv.helper.getParticles("epmv_sel") # maya gave shape,particle
if sel_particle is None:
sel_particle = self.epmv.helper.particle("epmv_sel", newC, display="cross")
self.epmv.helper.toggleXray(sel_particle, True)
else:
# self.epmv.helper.setCurrentSelection(sel_particle)
self.epmv.helper.updateParticles(newC, PS=sel_particle)
# self.updateViewer()
def toggleSelection(self, *args):
display = self.getBool(self.TOGGLESEL)
if display:
self.updateSelection()
sel = self.epmv.helper.getParticles("epmv_sel")
if sel is not None:
self.epmv.helper.toggleDisplay(sel, display)
else:
sel = self.epmv.helper.getParticles("epmv_sel")
if sel is not None:
self.epmv.helper.updateParticles([], PS=sel)
self.epmv.helper.toggleDisplay(sel, display)
def setCurMol(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol == None:
return
# restore display option
doDisplay = False
for k in self.CHECKBOXS:
# print (k,self.mv.molDispl[mname][k])
if self.host != "blender25": # ?
# this actually trigger the event, and to true!
self.setBool(self.CHECKBOXS[k], self.mv.molDispl[mname][k])
if self.mv.molDispl[mname][k]:
if self.host == "blender25": self.setBool(self.CHECKBOXS[k], self.mv.molDispl[mname][k])
doDisplay = True
# #restore color option
color = self.mv.molDispl[mname]["col"]
if type(color) is int: # functId
# here is n problem
self.setLong(self.COMB_BOX["col"], color)
elif color is None:
pass
else:
self.setColor(self.COLFIELD, color)
if mname not in self.mv.Mols.name:
# selection
self.setString(self.SELEDIT_TEXT, self.mv.MolSelection[mol.name][mname])
else:
self.setString(self.SELEDIT_TEXT, "")
# if True in self.molDispl[mname]: self.doDisplaySelection(self.molDispl[mname],private)"""
# print "update"
# should apply the ds...or not ? lets try
self.current_mol = mol
if doDisplay:
self.doDisplay(self.mv.molDispl[mname])
self.updateViewer()
return None
def updateMolDsDict(self, sel, mname, display, key):
# if sel == mname :
# self.mv.molDispl[mname][key]= display
# else :
# self.mv.molDispl[mname][key] = False
self.mv.molDispl[mname][key] = display
def getSelectionMol(self, selname):
# print "getSelectionMol "+selname+"XX"
for mol in self.mv.Mols:
# print mol.name,self.mv.MolSelection[mol.name]
for molselname in self.mv.MolSelection[mol.name]:
# print "molselname "+molselname
if molselname == selname:
return mol
return None
def edit_Selection(self, *args):
edit = self.getLong(self.COMB_BOX['selection'])
if edit == 0: # add selection
self.add_Selection()
elif edit == 1: # rename
self.rename_Selection()
elif edit == 2: # delete
self.delete_Selection()
return None
def delete_Selection_cb(self, res):
if res:
val = self.getLong(self.COMB_BOX["mol"])
name = self.COMB_BOX["mol"]["value"][val]
# name should be the current selection name if not return
mol = self.getSelectionMol(name)
if mol is None:
return
mname = mol.name
selname = name
del self.mv.MolSelection[mname][selname]
del self.mv.selections[mname][selname]
del self.mv.molDispl[selname]
self.resetPMenu(self.COMB_BOX["mol"])
self.restoreMolMenu()
def delete_Selection(self):
val = self.getLong(self.COMB_BOX["mol"])
name = self.COMB_BOX["mol"]["value"][val]
# name should be the current selection name if not return
mol = self.getSelectionMol(name)
if mol is None:
return
mname = mol.name
selname = name
question = "Are You sure you want to delete the current selection " + selname + " of molecule " + mname + "?"
self.drawQuestion("delete Selection", question=question, callback=self.delete_Selection_cb)
def rename_Selection(self):
self.drawInputQuestion(title="rename current selection",
question="Give the new name", callback=self.rename_Selection_cb)
def rename_Selection_cb(self, newname):
# whats the new name
# newname=
val = self.getLong(self.COMB_BOX["mol"])
name = self.COMB_BOX["mol"]["value"][val]
# name should be the current selection name if not return
mol = self.getSelectionMol(name)
if mol is None:
return
mname = mol.name
selname = name
# change the name in dic of indice
# change the name in the mol dictionary of selection
sel = self.mv.MolSelection[mname][selname]
dsDic = self.mv.molDispl[selname]
del self.mv.MolSelection[mname][selname]
del self.mv.selections[mname][selname]
del self.mv.molDispl[selname]
self.mv.MolSelection[mname][newname] = sel
self.mv.selections[mname][newname] = sel
self.mv.molDispl[newname] = dsDic
self.resetPMenu(self.COMB_BOX["mol"])
self.restoreMolMenu()
def add_Selection(self, n=None):
newSelection = True
print("add_selection")
if n is not None:
# restore mode
for selname in list(self.mv.MolSelection[n].keys()):
self.addItemToPMenu(self.COMB_BOX["mol"], str(selname))
# self.mv.selections[n][str(selname)]=self.mv.MolSelection[n]
return True
mname, mol, sel, selection = self.getDsInfo()
print(("mname ", mname))
print(("sel ", sel))
if mname not in self.mv.selections:
selname = mname
newSelection = False
else:
selname = mol.name + "_Selection" + str(len(self.mv.MolSelection[mol.name]))
print(("selname ", selname))
print((self.mv.MolSelection[mol.name]))
self.mv.MolSelection[mol.name][selname] = sel
self.mv.selections[mol.name][selname] = sel
self.mv.molDispl[selname] = {}
for k in ["cpk", "bs", "ss", "loft", "arm", "spline", "surf", "cms", "meta"]:
self.mv.molDispl[selname][k] = False
self.mv.molDispl[selname] = self.mv.molDispl[mol.name].copy()
for k in ["cpk", "bs", "ss", "loft", "arm", "spline", "surf", "cms", "meta"]:
self.mv.molDispl[mol.name][k] = False
# for k in self.CHECKBOXS:
# self.mv.molDispl[selname][k]=self.getBool(self.CHECKBOXS[k])
funcId = self.getLong(self.COMB_BOX["col"])
self.mv.molDispl[selname]["col"] = funcId
if funcId == 6:
# custom color
color = self.getColor(self.COLFIELD)
self.mv.molDispl[selname]["col"] = color
if newSelection:
self.addItemToPMenu(self.COMB_BOX["mol"], str(selname))
print((self.mv.molDispl[selname]))
# print str(self.indice_mol+self.indice)
return True
def getSelectionName(self, sel, mol):
for selname in list(self.mv.MolSelection[mol.name].keys()):
if sel == self.mv.MolSelection[mol.name][selname]:
return selname
return mol.name + "_Selection" + str(len(self.mv.MolSelection[mol.name]))
def restoreMolMenu(self):
# call this after flushing the combo box
print((self.mv.Mols))
for mol in self.mv.Mols:
self.addItemToPMenu(self.COMB_BOX["mol"], str(mol.name))
if mol.name in self.mv.selections:
for selname in list(self.mv.selections[mol.name].keys()):
self.addItemToPMenu(self.COMB_BOX["mol"], str(selname))
def setKeywordSel(self, *args):
key = self.keyword[self.getLong(self.COMB_BOX["keyword"])]
if key == 'keywords': key = ""
self.setString(self.SELEDIT_TEXT, key.replace(" ", ""))
self.updateSelection()
self.updateViewer()
def delete_Atom_Selection_cb(self, res):
if res:
mname, mol, sel, selection = self.getDsInfo()
self.epmv._deleteMolecule(mol)
# need to update the current_sel menu
self.resetPMenu(self.COMB_BOX["mol"])
self.restoreMolMenu()
def deleteAtomSet_cb(self, res):
if res:
mname, mol, sel, selection = self.getDsInfo()
self.mv.deleteAtomSet(selection)
def delete_Atom_Selection(self, *args):
# self.mv.deleteAtomSet...
mname, mol, sel, selection = self.getDsInfo()
if sel is mol.name:
# print sel,mname, mol , "del"
res = self.drawQuestion("Delete?", "Are You sure you want to delete " + mol.name,
callback=self.delete_Atom_Selection_cb)
print(res)
else:
res = self.drawQuestion("Delete?",
"Are You sure you want to delete the atoms of the current selection " + sel,
callback=self.deleteAtomSet_cb)
# connection to ePMV to be done or what ?
def delWater(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
res = self.drawQuestion("Delete?",
"Are You sure you want to delete the water of the current selection " + mol.name)
if res:
self.mv.deleteWater(mol)
def delHydrogen(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
res = self.drawQuestion("Delete?",
"Are You sure you want to delete the hydrogens of the current selection " + mol.name)
if res:
try:
self.mv.deleteHydrogens(mol)
except:
try:
self.mv.deleteAtomSet(mol.name + ":::H*")
except:
self.drawError("Problem while trying to remove the hydrogen. Check the consol")
def addHydrogen(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
res = self.drawQuestion("Add?",
"Are You sure you want to add the hydrogens of the current selection " + mol.name)
if res:
self.mv.add_hGC(mol, renumber=1, polarOnly=0, method='noBondOrder')
# problem does this update Atom representation ? need to do it first...
def doDisplay(self, disArray):
if disArray["cpk"]: self.dsCPK()
if disArray["bs"]: self.dsBS()
if disArray["ss"]: self.dsSS()
if disArray["loft"]: self.dsLoft()
if disArray["spline"]: self.dsSpline()
if disArray["surf"]: self.dsMSMS()
if disArray["cms"]: self.dsCMS()
if disArray["meta"]: self.dsMeta()
if disArray["arm"]: self.dsBones()
if disArray["col"] != None: self.color()
def dsLines(self, *args):
if self._timer:
t1 = time()
# print args
mname, mol, sel, selection, display = self.getDsInfo("lines")
if mol is None:
return
self.dsLines_cb(mname, mol, sel, selection, display)
def dsLines_cb(self, mname, mol, sel, selection, display):
if self._timer:
t1 = time()
# print args
if mol is None:
return
self.mv.displayLines(sel, negate=(not display))
ext = "_line"
if self.host == "c4d":
ext = "_lineds"
for ch in mol.chains:
obj = self.epmv.helper.getObject(ch.full_name() + ext) # lineds is for c4d..
self.epmv.helper.toggleDisplay(obj, display)
def dsPoints(self, *args):
if self._timer:
t1 = time()
# print args
mname, mol, sel, selection, display = self.getDsInfo("points")
if mol is None:
return
self.dsPoints_cb(mname, mol, sel, selection, display)
def dsPoints_cb(self, mname, mol, sel, selection, display):
if self._timer:
t1 = time()
# print args
if mol is None:
return
if selection is None:
selection = mol.allAtoms
parent = mol.geomContainer.masterGeom.obj
obj = self.epmv.helper.getObject(mol.name + "_cloudds")
if obj is None:
obj, meobj = self.epmv._PointCloudObject(mol.name + "_cloud",
vertices=selection.coords,
parent=parent)
self.epmv.helper.toggleDisplay(obj, display)
# self.mv.displayLines(selection)
# do we do for every chain ?
for ch in mol.chains:
obj = self.epmv.helper.getObject(ch.full_name() + "_cloudds")
if obj is None:
parent = mol.geomContainer.masterGeom.chains_obj[ch.name]
obj, meobj = self.epmv._PointCloudObject(ch.full_name() + "_cloud",
vertices=ch.residues.atoms.coords,
parent=parent)
self.epmv.helper.toggleDisplay(obj, display)
def dsCPK(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("cpk")
if mol is None:
return
scale = self.getReal(self.SLIDERS["cpk"])
self.dsCPK_cb(mname, mol, sel, selection, display, scale)
def dsCPK_cb(self, mname, mol, sel, selection, display, scale):
if mol is None:
return
if self._timer:
t1 = time()
# should do some dialog here
# and what about the progress bar
if not mol.doCPK:
mol.doCPK = True # drawQuestion("Are You sure you want \nto display the CPK ("+str(len(mol.allAtoms))+" atoms) ","CPK")
if mol.doCPK:
self.mv.displayCPK(sel, log=0, negate=(not display),
scaleFactor=scale) # redraw?
# funcColor[ColorPreset2.val-1](molname, [name], log=1)
# self.updateViewer()
if self._timer:
print(("time ", time() - t1))
return True
def dsBS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("bs")
if mol is None:
return
ratio = self.getReal(self.SLIDERS["bs_r"])
scale = self.getReal(self.SLIDERS["bs_s"])
self.dsBS_cb(mname, mol, sel, selection, display, scale, ratio)
def dsBS_cb(self, mname, mol, sel, selection, display, scale, ratio):
if mol is None:
return
if self._timer:
t1 = time()
bRad = 0.3
cradius = float(bRad / ratio) * scale
if not mol.doCPK:
print((mol.doCPK))
mol.doCPK = self.drawQuestion(
"Are You sure you want \nto display the BallSticks for " + str(len(mol.allAtoms)) + " atoms",
"Balls and Sticks")
if mol.doCPK:
self.mv.displaySticksAndBalls(sel, bRad=0.3 * scale,
cradius=cradius, bScale=0.,
negate=(not bool(display)),
only=False, bquality=0,
cquality=0)
if self._timer:
print(("time ", time() - t1))
return True
def dsSS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("ss")
if mol is None:
return
self.dsSS_cb(mname, mol, sel, selection, display)
def dsSS_cb(self, mname, mol, sel, selection, display):
if mol is None:
return
attrch = hasattr(mol.chains[0], "secondarystructureset")
# print "hasattr(self.mv, 'secondarystructureset')",hasattr(self.mv, 'secondarystructureset'),attrch
if not hasattr(self.mv, 'secondarystructureset') and not attrch:
# FIXME use self.molModVars to use PROSS or file info for SS
if mol.parser.hasSsDataInFile():
mod = "From File"
else:
mod = "From Pross"
if self.epmv.force_pross:
mod = "From Pross"
print ("compute secondary structure using ",mod)
self.mv.computeSecondaryStructure(mol.name, molModes={'%s' % mol.name: mod}, topCommand=0)
if self.epmv.uniq_ss:
self.mv.extrudeSecondaryStructureUnic(mol.name, topCommand=0,
log=0, display=0)
else:
self.mv.extrudeSecondaryStructure(mol.name, topCommand=0, log=0, display=0)
self.mv.displayExtrudedSS(sel, negate=(not bool(display)), only=False)
gname = "secondarystructure"
if self.epmv.uniq_ss:
gname = "SS"
if True in mol.chains.isDna:
self.mv.colorByResidueType(sel, [gname])
else:
self.mv.colorBySecondaryStructure(sel, [gname])
else:
self.mv.displayExtrudedSS(sel, negate=(not bool(display)), only=False)
return True
def drawBeadOption(self, *args):
self.drawSubDialog(self.beadUi, 2555558)
def dsBR(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("bead")
if mol is None:
return
self.dsBR_cb(mname, mol, sel, selection, display)
def dsBR_cb(self, mname, mol, sel, selection, display):
if mol is None:
return
if display:
# open the display option menu
if self.epmv.uniq_ss:
self.mv.beadedRibbonsUniq(sel, createEvents=False)
else:
self.mv.beadedRibbons(sel, createEvents=False)
# self.drawBeadOption(None)
# else :
# #selection?
for ch in mol.chains:
obj = self.epmv.helper.getObject(mol.name + ch.name + "_beadedRibbon")
self.epmv.helper.toggleDisplay(obj, display)
return True
def dsCMS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("cms")
if mol is None:
return
iso = self.getReal(self.SLIDERS["cmsI"])
res = self.getReal(self.SLIDERS["cmsR"])
gridsize = self.getLong(self.SLIDERS["cmsG"])
self.dsCMS_cb(mname, mol, sel, selection, display, iso, res, gridsize)
def dsCMS_cb(self, mname, mol, sel, selection, display, iso, res, gridsize):
if mol is None:
return
# print mname,mol,sel,selection,display,iso,res,gridsize
name = 'CoarseMS_' + mname
parent = None
if hasattr(mol.geomContainer.masterGeom, "obj"):
parent = mol.geomContainer.masterGeom.obj
self.epmv.storeLastUsed(mname, "cms", {"iso": iso, "res": res, "gridsize": gridsize})
if iso == 0.:
return
if res == 0.:
return
if name not in mol.geomContainer.geoms:
geom = self.epmv.coarseMolSurface(mol, [gridsize, gridsize, gridsize],
isovalue=iso, resolution=res,
name=name)
mol.geomContainer.geoms[name] = geom
obj = self.epmv.helper.createsNmesh(name, geom.getVertices(), None,
geom.getFaces(), smooth=True, proxyCol=True)
self.epmv._addObjToGeom(obj, geom)
self.epmv.helper.addObjectToScene(self.epmv.helper.getCurrentScene(),
obj[0], parent=parent)
# if self.host!= "3dsmax" :
self.mv.colorByAtomType(mname, [name], log=0)
obj = obj[0]
else:
obj = mol.geomContainer.geoms[name].obj
# print "toggle",obj,type("obj")
self.epmv.helper.toggleDisplay(obj, display)
return True
def updateCMS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("cms")
if mol is None:
return
iso = self.getReal(self.SLIDERS["cmsI"])
res = self.getReal(self.SLIDERS["cmsR"])
gridsize = self.getLong(self.SLIDERS["cmsG"])
if res >= 0.0:
res = -0.001
self.setReal(self.SLIDERS["cmsR"], -0.001)
self.updateCMS_cb(mname, mol, sel, selection, display, iso, res, gridsize)
def updateCMS_cb(self, mname, mol, sel, selection, display, iso, res, gridsize):
if mol is None:
return
name = 'CoarseMS_' + mname
if display: # and name in list(mol.geomContainer.geoms.keys()):
parent = mol.geomContainer.masterGeom.obj
self.epmv.storeLastUsed(mname, "cms", {"iso": iso, "res": res, "gridsize": gridsize})
# isovalue=7.1#float(cmsopt['iso'].val),
# resolution=-0.3#float(cmsopt['res'].val)
if iso == 0.:
return
if res == 0.:
return
g = self.epmv.coarseMolSurface(selection, [gridsize, gridsize, gridsize],
isovalue=iso,
resolution=res,
name=name,
geom=mol.geomContainer.geoms[name])
self.epmv.helper.updateMesh(g.mesh, vertices=g.getVertices(),
faces=g.getFaces(), obj=name)
return True
def dsMSMS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("surf")
print ("dsMSMS", mol)
if mol is None:
return
name = 'MSMS-MOL' + mname
pradius = self.getReal(self.SLIDERS["surf"])
density = self.getReal(self.SLIDERS["surfdensity"])
self.dsMSMS_cb(mname, mol, sel, selection, display, pradius, density)
def dsMSMS_cb(self, mname, mol, sel, selection, display, pradius, density):
if mol is None:
return
name = 'MSMS-MOL' + mname
display = bool(display)
hide = not display
# print "dsMSMS_cb",display,(not bool(display)),hide
self.epmv.storeLastUsed(mname, "surf", {"pradius": pradius, "density": density})
# print pradius,density
if pradius == 0.:
return
if density == 0.:
return
if name in mol.geomContainer.geoms:
# faster to just use the toggle here
print ("get " + name)
obj = self.epmv.helper.getObject(name)
print ("ret ", obj)
self.epmv.helper.toggleDisplay(obj, display)
# try:
# self.mv.displayMSMS(sel, negate=hide,
# only=False, surfName=name, nbVert=1)
# except :
# self.drawError("MSMS ERROR!")
else:
print ("compute")
self.mv.computeMSMS(sel, display=(bool(display)),
surfName=name, perMol=0,
pRadius=pradius, density=density)
# if self.host!= "3dsmax" :
self.mv.colorByAtomType(mname, [name], log=0)
# funcColor[ColorPreset2.val-1](molname, [name], log=1)
return True
def updateMSMS(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("surf")
if mol is None:
return
name = 'MSMS-MOL' + mname
pradius = self.getReal(self.SLIDERS["surf"])
density = self.getReal(self.SLIDERS["surfdensity"])
self.updateMSMS_cb(mname, mol, sel, selection, display, pradius, density)
def updateMSMS_cb(self, mname, mol, sel, selection, display, pradius, density):
if mol is None:
return
name = 'MSMS-MOL' + mname
self.epmv.storeLastUsed(mname, "surf", {"pradius": pradius, "density": density})
if pradius == 0.:
return
if density == 0.:
return
if display and name in mol.geomContainer.geoms:
self.mv.computeMSMS(sel, # hdensity=msmsopt['hdensity'].val,
hdset=None,
density=density,
pRadius=pradius,
perMol=0, display=True,
surfName=name)
return True
def dsLoft(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("loft")
i = self.getLong(self.COMB_BOX["bones"])
if mol is None:
return
self.dsLoft_cb(mname, mol, sel, selection, display, i)
def dsLoft_cb(self, mname, mol, sel, selection, display, i):
if mol is None:
return
for c in mol.chains:
name = "loft" + mol.name + "_" + c.name
loft = self.epmv.helper.getObject(name)
if c.ribbonType() == 'NA':
laders = self.epmv.helper.getObject("loft" + mol.name + c.name + "_lader")
if loft is None:
# self.getAtomsSelection(self.boneslevel[i],sel,selection,mol,chain=c)
if c.ribbonType() == 'NA':
lsel = c.residues.atoms.get("O5'").coords
else:
lsel = c.residues.atoms.get("CA").coords
parent = mol.geomContainer.masterGeom.chains_obj[c.name]
# parent = mol.geomContainer.masterGeom.obj
loft = self.epmv._makeRibbon(name, lsel, parent=parent)
# waht about the lader
if c.ribbonType() == 'NA':
# make the ladder
laders = self.epmv.NAlader("loft", mol, c, parent=parent)[0]
self.epmv.helper.toggleDisplay(loft, display)
if c.ribbonType() == 'NA':
# this doesnt work in blender
if laders is not None:
self.epmv.helper.toggleDisplay(laders, display)
return True
def dsSpline(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("spline")
# mname,mol,sel,selection,display = self.getDsInfo("spline")
i = self.getLong(self.COMB_BOX["bones"])
if mol is None:
return
self.dsSpline_cb(mname, mol, sel, selection, display, i)
def dsSpline_cb(self, mname, mol, sel, selection, display, i):
if mol is None:
return
for c in mol.chains:
name = mol.name + "_" + c.name + "spline" # 'spline'+mol.name #
obSpline = self.epmv.helper.getObject(name)
if obSpline is None:
# lsel = self.getAtomsSelection(self.boneslevel[i],sel,selection,mol,chain=c)
if c.ribbonType() == 'NA':
lsel = c.residues.atoms.get("O5'")
else:
lsel = c.residues.atoms.get("CA")
parent = mol.geomContainer.masterGeom.chains_obj[c.name]
if isinstance(lsel, AtomSet):
obSpline, spline = self.epmv.helper.spline(name, lsel.coords,
scene=self.epmv.helper.getCurrentScene(),
parent=parent)
else:
obSpline, spline = self.epmv.helper.spline(name, lsel,
scene=self.epmv.helper.getCurrentScene(),
parent=parent)
# if c.ribbonType()=='NA':
# #make the ladder
# self.epmv.NAlader(mol,c)
self.epmv.helper.toggleDisplay(obSpline, display)
return True
def dsMeta(self, *args):
mname, mol, sel, selection, display = self.getDsInfo("meta")
if mol is None:
return
# make the metaballs
self.dsMeta_cb(mname, mol, sel, selection, display)
def dsMeta_cb(self, mname, mol, sel, selection, display):
if mol is None:
return
# make the metaballs
name = 'metaballs' + mol.name
metaballs = self.epmv.helper.getObject(name)
if metaballs is None:
# atoms = selection.allAtoms #or a subselection of surface atoms according sas
metaballsModifyer, metaballs = self.epmv._metaballs(name,
selection.coords,
selection.radius,
scn=self.epmv.helper.getCurrentScene(),
root=mol.geomContainer.masterGeom.obj)
else:
self.epmv.helper.toggleDisplay(metaballs, display)
return True
def updateCGeomList(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
# get the list of custom geom in geomContainer
# reset the menu and fill it with new list.
self.resetPMenu(self.COMB_BOX["cgeom"])
self.customgeom = []
for gname in mol.geomContainer.geoms:
if gname[0:2] == "b_":
self.customgeom.append(gname[2:])
self.addItemToPMenu(self.COMB_BOX["cgeom"], str(gname[2:]))
def updateCGeomDisplay(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
# display the current state of the object display.
gname = self.customgeom[self.getLong(self.COMB_BOX["cgeom"])]
if gname == "None":
self.setBool(self.CHECKBOXS["cgeom"], False)
return
visibility = self.epmv.helper.getVisibility(gname)
self.setBool(self.CHECKBOXS["cgeom"], visibility)
def dsCustomGeom(self, *args):
display = self.getBool(self.CHECKBOXS["cgeom"])
gname = self.customgeom[self.getLong(self.COMB_BOX["cgeom"])]
def dsCustomGeom_cb(self, gname, display):
if gname == "None" or gname is None:
return
o = self.epmv.helper.getObject(gname)
self.epmv.helper.toggleDisplay(o, display)
def getAtomsSelectionPrody(self, level, sel, selection, mol, chain=None):
# use mol.select
##boneslevel=["Trace","Backbone","Full Atoms","Domain","Chain","Mol","Selection"]
atlevel = {"Trace": ["CA", "O5'"],
"Backbone": ["N,CA,C,N", "P,O5',C5',C4',C3',O3'"],
"Full Atoms": ["all", "all"],
"Domain": ["CA", "P"], # how to define the domain
"Chain": ["ccenter", "ccenter"],
"Mol": ["mcenter", "mcenter"],
}
selString = str(self.getString(self.SELEDIT_TEXT))
lchain = mol.chains
if chain is not None:
lchain = [chain]
lsel = []
i = 0
if level == 'Mol':
lsel = [mol.getCenter(), ]
elif level == 'Chain':
lsel = []
for ch in chain:
# get the center of the chain
lsel.append(util.getCenter(ch.residues.atoms.coords))
elif level == 'Domain':
# doesthe mol have domain information?
lsel = []
if not hasattr(mol, "hasDomains"):
domains = self.epmv.getDomains(mol)
if domains < 0:
return lsel
if mol.hasDomains:
# need to getcener of mass of each domains ?
lres = self.epmv.getDomainsResiduesCoords(mol)
lsel = [util.getCenter(l) for l in lres]
elif level == 'Selection':
lsel = selection
else:
print ("level is ", level)
if selString == "":
# if sel == mol.name :
for ch in lchain:
if ch.ribbonType() == 'NA':
i = 1
lsel.extend(
mol.prodymodel.model.select("chain " + ch.name + " name " + atlevel[level][i]).getCoords())
# selection = ch.residues.atoms.get(atlevel[level][i])
# selection.sort()
# lsel.extend(selection)
else:
ch = selection.findParentsOfType(Chain)[0]
if ch.ribbonType() == 'NA':
i = 1
selection = selection.get(atlevel[level][i])
selection.sort()
lsel.extend(selection)
print("mySelection is ", lsel)
return lsel
def getAtomsSelection(self, level, sel, selection, mol, chain=None):
atlevel = {"Trace": ["CA", "O5'"],
"Backbone": ["N,CA,C,N", "P,O5',C5',C4',C3',O3'"],
"Full Atoms": ["all", "all"],
"Domain": ["CA", "P"], # how to define the domain
"Chain": ["ccenter", "ccenter"],
"Mol": ["mcenter", "mcenter"],
}
lsel = AtomSet()
lchain = mol.chains
if chain is not None:
lchain = [chain]
i = 0
# check selection,check if dna
if level == 'Mol':
lsel = [mol.getCenter(), ]
elif level == 'Chain':
lsel = []
for ch in lchain:
# get the center of the chain
lsel.append(util.getCenter(ch.residues.atoms.coords))
elif level == 'Domain':
# doesthe mol have domain information?
lsel = []
if not hasattr(mol, "hasDomains"):
domains = self.epmv.getDomains(mol)
if domains < 0:
return lsel
if mol.hasDomains:
# need to getcener of mass of each domains ?
lres = self.epmv.getDomainsResiduesCoords(mol)
lsel = [util.getCenter(l) for l in lres]
elif level == 'Selection':
lsel = selection
else:
if sel == mol.name:
for ch in lchain:
if ch.ribbonType() == 'NA':
i = 1
selection = ch.residues.atoms.get(atlevel[level][i])
selection.sort()
lsel.extend(selection)
else:
ch = selection.findParentsOfType(Chain)[0]
if ch.ribbonType() == 'NA':
i = 1
selection = selection.get(atlevel[level][i])
selection.sort()
lsel.extend(selection)
print("mySelection is ", lsel)
return lsel
def dsBones(self, *args):
# boneslevel=["Trace","Backbone","Full Atoms","Domain","Chain","Mol","Selection"]
mname, mol, sel, selection, display = self.getDsInfo("arm")
if mol is None:
return
name = mname + "_Armature"
armObj = self.epmv.helper.getObject(name)
i = self.getLong(self.COMB_BOX["bones"])
print("dsBones", name, self.boneslevel[i], mol)
# return
atlevel = "CA"
if armObj is None:
# level getAtomsSelection gave issue
if self.host == "blender25" and self.epmv._prody:
if not hasattr(mol, "prodymodel") or mol.prodymodel is None:
from ePMV.extension._prody._prody import _prodymodel
mol.prodymodel = _prodymodel(mol.parser.filename, center=self.epmv.center_mol)
lsel = self.getAtomsSelectionPrody(self.boneslevel[i], sel, selection, mol)
else:
lsel = self.getAtomsSelection(self.boneslevel[i], sel, selection, mol)
if isinstance(lsel, AtomSet):
object, bones = self.epmv._armature(name, lsel,
scn=self.epmv.helper.getCurrentScene(),
root=mol.geomContainer.masterGeom.obj)
# mode=self.boneslevel[i])
else:
object, bones = self.epmv._armature(name, lsel, coords=lsel,
scn=self.epmv.helper.getCurrentScene(),
root=mol.geomContainer.masterGeom.obj)
mol.geomContainer.geoms["armature"] = [object, bones, lsel]
else:
# how to update > delete and recreate ?
self.epmv.helper.toggleDisplay(armObj, display)
return True
def custom_color(self, *args):
mname, mol, sel, selection, color = self.getDsInfo("col")
if mol is None:
return
lGeom = self.getGeomActive(mname)
self.setLong(self.COMB_BOX["col"], 6) # customcolor
self.custom_color_cb(mname, mol, sel, selection, color, lGeom)
def custom_color_cb(self, mname, mol, sel, selection, color, lGeom, *args):
if mol is None:
return
self.mv.molDispl[mname]["col"] = color
self.funcColor[7](selection, [color], lGeom, log=1)
def color(self, *args):
# print self.funcColor
mname, mol, sel, selection, color = self.getDsInfo("col")
if mol is None:
return
lGeom = self.getGeomActive(mname)
funcId = self.getLong(self.COMB_BOX["col"])
self.color_cb(mname, mol, sel, selection, color, lGeom, funcId)
def color_cb(self, mname, mol, sel, selection, color, lGeom, funcId, *args):
if mol is None:
return
if "SS" in lGeom:
lGeom.pop(lGeom.index("SS"))
for ch in mol.chains:
name = "SS%s" % (ch.id)
lGeom.append(name)
if funcId == 7:
# custom color
self.mv.molDispl[mname]["col"] = color
self.funcColor[7](selection, [color], lGeom, log=1)
elif funcId == 8 or funcId == 9 or funcId == 10:
# color by properties , ie NtoC, Bfactor, SAS
self.mv.colorByProperty.level = 'Atom'
if funcId == 8:
# what about chain selection
maxi = max(selection.number) # selection[-1].number
mini = min(selection.number) # selection[0].number
property = 'number'
elif funcId == 9:
maxi = max(selection.temperatureFactor)
mini = min(selection.temperatureFactor)
property = 'temperatureFactor'
elif funcId == 10:
if not hasattr(selection, "sas_area"):
try:
self.mv.computeSESAndSASArea(mol)
except:
self.drawError("Problem with mslib")
maxi = max(selection.sas_area)
mini = min(selection.sas_area)
property = 'sas_area'
# print ("color",len(selection),property)
self.funcColor[8](selection, lGeom, property, mini=float(mini),
maxi=float(maxi), propertyLevel='Atom',
colormap='rgb256')
self.mv.molDispl[mname]["col"] = funcId
else:
self.funcColor[funcId](selection, lGeom)
self.mv.molDispl[mname]["col"] = funcId
for geom in lGeom:
if geom.find("CoarseMS_") != -1:
self.epmv.storeLastUsed(mname, "cms", {"colorMod": funcId})
elif geom.find("MSMS") != -1:
self.epmv.storeLastUsed(mname, "surf", {"colorMod": funcId})
def drawPreset(self, *args):
# To finish and define/...
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
# self.presettype=['available presets:',' Lines',' Liccorice',' SpaceFilling',
# ' Ball+Sticks',' RibbonProtein+StickLigand',
# ' RibbonProtein+CPKligand',' xray',' Custom',
# ' Save Custom As...']
# load,edit save representation preset
preset = self.presettype[self.getLong(self.COMB_BOX["preset"])]
print(preset)
if preset.strip() == 'Liccorice':
# displayBS as licorice which is simply ratio == 1.0
# set the ratio and do the command
self.setReal(self.SLIDERS["bs_r"], 1.0)
self.setBool(self.CHECKBOXS["bs"], True)
self.dsBS()
elif preset.strip() == 'xray':
# ??
pass
elif preset.strip() == 'Lines':
self.mv.displayLines(selection)
elif preset.strip() == 'Ball+Sticks':
self.setReal(self.SLIDERS["bs_r"], 1.5)
self.setBool(self.CHECKBOXS["bs"], True)
self.dsBS()
elif preset.strip() == 'SpaceFilling':
self.setReal(self.SLIDERS["cpk"], 1.)
self.setBool(self.CHECKBOXS["cpk"], True)
self.dsCPK()
elif preset.strip() == 'RibbonProtein+StickLigand':
# need to check if ligand exist
ligand = self.mv.select(str(mol.name + "::ligand:"),
negate=False, only=True, xor=False,
log=0, intersect=False)
if not len(ligand):
return
# 1 select protein
p = self.mv.select(str(mol.name + "::aminoacids:"),
negate=False, only=True, xor=False,
log=0, intersect=False)
# 2 dsSS for protein
self.mv.displayExtrudedSS(p, negate=False, molModes={mname: 'From Pross'},
only=False, log=1)
self.mv.colorBySecondaryStructure(p, ["secondarystructure"])
self.setBool(self.CHECKBOXS["ss"], True)
# 3ds ligand Liccroice
ratio = self.getReal(self.SLIDERS["bs_r"])
scale = self.getReal(self.SLIDERS["bs_s"])
bRad = 0.3
cradius = float(bRad / ratio) * scale
self.mv.displaySticksAndBalls(ligand, bRad=0.3 * scale,
cradius=cradius, bScale=0.,
negate=False,
only=False, bquality=0,
cquality=0)
self.setBool(self.CHECKBOXS["bs"], True)
elif preset.strip() == 'RibbonProtein+CPKligand':
# need to check if ligand exist
ligand = self.mv.select(str(mol.name + "::ligand:"),
negate=False, only=True, xor=False,
log=0, intersect=False)
if not len(ligand):
return
# 1 select protein
p = self.mv.select(str(mol.name + "::aminoacids:"),
negate=False, only=True, xor=False,
log=0, intersect=False)
# 2 dsSS for protein
self.mv.displayExtrudedSS(p, negate=False, molModes={mname: 'From Pross'},
only=False, log=1)
self.mv.colorBySecondaryStructure(p, ["secondarystructure"])
self.setBool(self.CHECKBOXS["ss"], True)
# 3ds ligand CPK
scale = self.getReal(self.SLIDERS["cpk"])
self.mv.displayCPK(ligand, log=0, negate=False,
scaleFactor=scale, redraw=0) # redraw?
self.setBool(self.CHECKBOXS["cpk"], True)
# what are Custom and Save as?
def createTexture(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
lGeom = self.getGeomActive(mname)
print(lGeom)
i = self.getLong(self.COMB_BOX["uv"])
print((self.uvselection[i]))
filename = self.getString(self.INPUTSTR["uv"])
surfName = self.getString(self.INPUTSTR["uvg"])
import math
# surfName="CoarseMS_ind"
surf = mol.geomContainer.geoms[surfName]
vertices = surf.getVertices()
faces = surf.getFaces()
colors = mol.geomContainer.getGeomColor(surf) # per vertex of per face...msms is per vertex
if colors is None:
if surfName in mol.geomContainer.atomPropToVertices:
func = mol.geomContainer.atomPropToVertices[surfName]
geom = mol.geomContainer.geoms[surfName]
atms = mol.geomContainer.atoms[surfName]
colors = func(geom, atms, 'colors', propIndex=surfName)
surfobj = self.epmv.helper.getObject(surf.obj)
print((len(faces), math.sqrt(len(faces))))
s = 20
sizex = math.sqrt(len(faces)) * (s + 1)
sizey = math.sqrt(len(faces)) * (s + 1)
print((sizex, sizey))
# mat = epmv.helper.createTexturedMaterial(surfName+"UV","/Users/ludo/uv.png")
# epmv.helper.assignMaterial(mat,surfobj,texture=True)
if self.uvselection[i] == "regular disposed triangle":
if self.host != 'maya':
mat = self.epmv.helper.getMaterial(surfName + "UV")
if mat is None:
mat = self.epmv.helper.createTexturedMaterial(surfName + "UV", filename)
self.epmv.helper.assignMaterial(mat, surfobj, texture=True)
self.epmv.helper.makeTexture(surfobj,
filename=filename, colors=colors,
sizex=sizex, sizey=sizey, faces=faces,
s=s, draw=True) # maya need inversion.
# if uv already exist from automatic unwrapping :
else: # "unwrapped mesh UV"
self.epmv.helper.makeTextureFromUVs(surfobj,
filename=filename, colors=colors,
sizex=sizex, sizey=sizey,
s=s, draw=True)
def save_ePMVScript(self, *args):
# print (args)
ids = self.getLong(self.COMB_BOX['scriptS'])
# print (ids)
# print (self.current_script)
if ids == 1:
filename = self.saveDialog(label="Save Python file as")
elif ids == 0:
filename = self.current_script
text = self.getStringArea(self.SCRIPT_TEXT)
f = open(filename, "w")
f.write(text)
f.close()
# ids 0 -> Save
# ids 1 -> Save as
def set_ePMVScript(self, *args):
from ePMV import demo
dir = demo.__path__[0]
ids = self.getLong(self.COMB_BOX['scriptO'])
filename = None
if ids == 0: # Open..ask for broser
self.fileDialog(label="Open python file", callback=self.set_ePMVScript_cb)
else:
filename = dir + '/' + self.scriptliste[ids] + '.py'
self.set_ePMVScript_cb(filename)
def set_ePMVScript_cb(self, filename):
if filename:
try:
f = open(filename, 'r')
script = f.read()
f.close()
except:
script = "file :\n" + filename + " didnt exist !\n"
self.setStringArea(self.SCRIPT_TEXT, script)
self.current_script = filename
def execPmvComds(self, *args):
# first select the text
# cmds=pmvcmds.val
text = self.getStringArea(self.SCRIPT_TEXT) # getSelectTxt()
if text is not None:
cmds = text
# for l in text:
# cmds+=l+'\n'
# print len(cmds),cmds
exec (cmds, {'self': self.mv, 'epmv': self.epmv})
# self.updateViewer()
return True
def drawPreferences(self, *args):
# drawSubDialog
self.drawSubDialog(self.options, 2555554, callback=self.options.SetPreferences)
if self.host != "blender25": self.options.restorePreferences()
# in c4d asynchr but blender syncrho
def drawAbout(self, *args):
# doit=self.epmv.inst.checkForUpdate()
liste_plugin = {"upy": {"version_current": upy.__version__, "path": upy.__path__[0] + os.sep},
"ePMV": {"version_current": self.__version__, "path": ePMV.__path__[0] + os.sep}}
from upy.upy_updater import Updater
up = Updater(host="all", helper=self.helper, gui=self, liste_plugin=liste_plugin, typeUpdate="std")
up.readUpdateNote()
self.__about__ = "v" + self.__version__ + " of ePMV is installed.\nv" + up.result_json["ePMV"][
"version_std"] + " is available under Help/Check for Updates.\n\n"
self.__about__ += "v" + upy.__version__ + " of uPy is installed.\nv" + up.result_json["upy"][
"version_std"] + " is available under Help/Check for Updates.\n"
# self.__about__="ePMV v"+self.__version__+" latest v"+self.epmv.inst.newePMV+"\n"
# self.__about__+="uPy v"+upy.__version__+" latest v"+self.epmv.inst.newupy+"\n"
self.__about__ += """
ePMV by Ludovic Autin,Graham Jonhson,Michel Sanner.
Developed in the Molecular Graphics Laboratory directed by Arthur Olson.
The Scripps Research Institute
http://epmv.scripps.edu"""
self.drawMessage(title='About ePMV', message=self.__about__)
def launchBrowser(self, *args):
import webbrowser
webbrowser.open(self.__url__[0])
def citationInformation(self, *args):
import webbrowser
webbrowser.open(self.__url__[2])
def checkUpdate_cb_cb(self, res):
doit = self.epmv.inst.checkForUpdate()
self.drawMessage(title='update ePMV',
message="ePMV will be now update. Please be patient whilethe update downloaded.")
self.epmv.inst.update(pmv=doit[0], epmv=doit[1], upy=doit[2], backup=res)
self.drawMessage(title='update ePMV', message="You are now up to date. Please restart " + self.host)
def checkUpdate_cb(self, res):
if res:
self.drawQuestion(question="Do you want to backup the current version?", callback=self.checkUpdate_cb_cb)
def check_update(self, *args):
# get current version
import Support
self.epmv.inst.current_version = self.__version__
self.epmv.inst.PMVv = Support.version.__version__
self.epmv.inst.upyv = upy.__version__
print("version ", self.epmv.inst.PMVv, self.epmv.inst.upyv)
doit = self.epmv.inst.checkForUpdate()
if True in doit:
# need some display?
msg = "An update is available.\nNotes:\n"
msg += "ePMV v" + self.__version__ + " latest is " + self.epmv.inst.newePMV + "\n"
msg += "uPy v" + upy.__version__ + " latest is " + self.epmv.inst.newupy + "\n"
msg += self.epmv.inst.update_notes + "\n"
msg += "Do you want to update?\n"
self.drawQuestion(question=msg, callback=self.checkUpdate_cb)
# if res :
# res = self.drawQuestion(question="Do you want to backup the current version?")
# self.drawMessage(title='update ePMV',message="ePMV will be now update. Please be patient whilethe update downloaded.")
# self.epmv.inst.update(pmv=doit[0],epmv=doit[1],upy=doit[2],backup=res)
# self.drawMessage(title='update ePMV',message="You are now up to date. Please restart "+self.host)
else:
self.drawMessage(title='update ePMV', message="You are up to date! no update need.")
def devCheckUpdate(self, *args):
# get current version
liste_plugin = {"upy": {"version_current": upy.__version__, "path": upy.__path__[0]},
"ePMV": {"version_current": self.__version__, "path": ePMV.__path__[0]}}
from upy.upy_updater import Updater
up = Updater(host="all", helper=self.helper, gui=self, liste_plugin=liste_plugin, typeUpdate="dev")
up.checkUpdate()
def stdCheckUpdate(self, *args):
# get current version
liste_plugin = {"upy": {"version_current": upy.__version__, "path": upy.__path__[0]},
"ePMV": {"version_current": self.__version__, "path": ePMV.__path__[0]}}
from upy.upy_updater import Updater
up = Updater(host="all", helper=self.helper, gui=self, liste_plugin=liste_plugin)
up.checkUpdate()
def joinSS(self, *args):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
for ch in mol.chains:
listeObj = []
if not hasattr(ch, "secondarystructureset"):
return
for elem in ch.secondarystructureset:
if hasattr(elem, "exElt"):
ex = elem.exElt
else:
continue
name = elem.name
# print ex,display,hasattr(ex,"obj")
obj = self.epmv.helper.getObject(mol.name + "_" + ch.name + "_" + name)
if obj is None:
continue
listeObj.append(obj)
self.epmv.helper.JoinsObjects(listeObj)
def addExtensionGUI(self, *args):
# should do a mini dialog asking to browse and propose the current
# supported extension excepting already set up extension
# should have a subdialog instead
question = "Enter the extension name follow by the directory,\nie 'modeller:/Library/modeller/modlib'"
self.drawInputQuestion(question=question, callback=self.epmv.inst.addExtension)
def drawPyAutoDock(self, *args):
# drawSubDialog
self.drawSubDialog(self.ad, 2555555) # in c4d asynchr but blender syncrho
def drawModellerGUI(self, *args):
# drawSubDialog
self.drawSubDialog(self.pd, 2555556, callback=self.pd.doIt) # in c4d asynchr but blender syncrho
def drawPalette(self, *args):
# drawSubDialog
self.drawSubDialog(self.pmvPalgui, 25555560) # in c4d asynchr but blender syncrho
def applyTransf(self, *args):
# drawSubDialog
self.drawSubDialog(self.applyPanel, 25553360)
def bindGeom(self, *args):
# drawSubDialog
self.drawSubDialog(self.bindPanel, 25553361)
def drawBuildDNA(self, *args):
self.drawSubDialog(self.dnaPanel, 555555555)
def CRYSTAL(self, geomname):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
self.drawCRYSTAL_cb(mname, mol, sel, selection, geomname)
def drawCrystal(self, *args):
# just ask for the name of the geometry we want to instanciate
self.drawInputQuestion(title="Build crystal packing",
question="Enter the name of the geometry you want to use, or leave empty to use selection",
callback=self.CRYSTAL)
def drawCRYSTAL_cb(self, mname, mol, sel, selection, geomname):
if not geomname:
# try to get current selection
geom = self.epmv.helper.getCurrentSelection()
if not len(geom): return
# else : geom= geom[0]
# geomname = self.epmv.helper.getName(geom[0])
# return
else:
geom = [self.epmv.helper.getObject(geomname), ]
if geom[0] is None:
return
print ("try to build for ", mol, geom)
# need to get the current mol
if not hasattr(mol, "crystal_pack"):
self.epmv.buildCrystal(mol, obj_to_instance=geom)
def BIOMT(self, geomname):
mname, mol, sel, selection = self.getDsInfo()
if mol is None:
return
self.drawBIOMT_cb(mname, mol, sel, selection, geomname)
def drawBIOMT(self, *args):
# just ask for the name of the geometry we want to instanciate
self.drawInputQuestion(title="Build biological unit",
question="Enter the name of the geometry you want to use, or select it",
callback=self.BIOMT)
def drawBIOMT_cb(self, mname, mol, sel, selection, geomname):
if not geomname:
# try to get current selection
geom = self.epmv.helper.getCurrentSelection()
if not len(geom): return
# else : geom= geom[0]
# geomname = self.epmv.helper.getName(geom[0])
# return
else:
geom = [self.epmv.helper.getObject(geomname), ]
if geom[0] is None:
return
# need to get the current mol
if not hasattr(mol, "biomat"):
mat = self.epmv.parse_PDB_BIOMT(mol.parser)
mol.biomat = mat
# now build the instance...
root = mol.geomContainer.masterGeom.obj
# create top parent for bioMT
name = mol.name + "bioMT"
parent = self.epmv.helper.getObject(name)
scene = self.epmv.helper.getCurrentScene()
if parent is None:
parent = self.epmv.helper.newEmpty(name)
self.epmv.helper.addObjectToScene(scene, parent)
for g in geom:
geomname = self.epmv.helper.getName(g)
geomInstParent = self.epmv.helper.getObject(geomname + "bioMT")
if geomInstParent is None:
geomInstParent = self.epmv.helper.newEmpty(geomname + "bioMT")
self.epmv.helper.addObjectToScene(scene, geomInstParent, parent=parent)
for symOpNum in mol.biomat:
nBiomolecule = 1
nrow = len(mol.biomat[symOpNum])
matsize = 3
if nrow > 3: # several unit
if (nrow % 3) == 0:
nBiomolecule = len(mol.biomat[symOpNum]) / 3
elif (nrow % 4) == 0:
nBiomolecule = len(mol.biomat[symOpNum]) / 4
matsize = 4
parent = geomInstParent
for biomol in range(int(nBiomolecule)):
index = (matsize * biomol)
if nBiomolecule > 1:
bioInstParent = self.epmv.helper.getObject(geomname + "bioMT" + "_bio" + str(biomol))
if bioInstParent is None:
bioInstParent = self.epmv.helper.newEmpty(geomname + "bioMT" + "_bio" + str(biomol))
self.epmv.helper.addObjectToScene(scene, bioInstParent, parent=geomInstParent)
parent = bioInstParent
geom_inst = self.epmv.helper.getObject(geomname + "_bio" + str(biomol) + "_" + str(symOpNum))
if geom_inst is None:
m = mol.biomat[symOpNum][index:index + matsize]
if len(m) == 3:
m.append([0., 0., 0., 1.]) # ?
if self.host == "maya":
geom_inst = self.epmv.helper.cmdInstance(
geomname + "_bio" + str(biomol) + "_" + str(symOpNum),
g, matrice=m, parent=parent, material=None)
else:
geom_inst = self.epmv.helper.newInstance(
geomname + "_bio" + str(biomol) + "_" + str(symOpNum),
g, matrice=m, parent=parent, material=None)
def drawPymolGUI(self, *args):
# drawSubDialog
# print("drawSubPyMol")
# if self.epmv._pymol :
if self.pymolgui is None:
# print("create pymol gui as a subDialog")
from ePMV.PyMol.pymolAdaptor import pymolGui
# exec('self.pymolgui = pymolGui()\n',{"pymolGui":pymolGui,"self":self})
self.pymolgui = pymolGui()
if self.pym is None:
from ePMV.PyMol.pymolAdaptor import pymolAdaptor
# print("createpymolAdaptor")
self.pym = pymolAdaptor(debug=0)
self.pymolgui.setup(sub=True, epmv=self.epmv, pym=self.pym)
self.drawSubDialog(self.pymolgui, 25555570)
def drawProdyGUI(self, *args):
self.drawSubDialog(self.prodygui, 25555570)
def modellerOptimize(self, *args):
import modeller
mname, mol, sel, selection = self.getDsInfo()
mdl = mol.mdl
# mdl = mol.mdl
# print(mname)
# Select all atoms:
atmsel = modeller.selection(mdl)
# Generate the restraints:
mdl.restraints.make(atmsel, restraint_type='stereo', spline_on_site=False)
# mdl.restraints.write(file=mpath+mname+'.rsr')
mpdf = atmsel.energy()
# print("before optmimise")
# Create optimizer objects and set defaults for all further optimizations
cg = modeller.optimizers.conjugate_gradients(output='REPORT')
mol.pmvaction.last = 10000
# print("optimise")
maxit = self.pd.getLong(self.pd.NUMBERS['miniIterMax'])
mol.pmvaction.store = self.pd.getBool(self.pd.CHECKBOXS['store'])
mol.pmvaction.redraw = self.pd.getBool(self.pd.CHECKBOXS['display'])
cg.optimize(atmsel, max_iterations=maxit, actions=mol.pmvaction) # actions.trace(5, trcfil))
del cg
mol.pmvaction.redraw = False
mol.allAtoms.setConformation(0)
return True
def modellerMD(self, *args):
import modeller
mname, mol, sel, selection = self.getDsInfo()
mdl = mol.mdl
# print(mname)
# Select all atoms:
atmsel = modeller.selection(mdl)
# Generate the restraints:
mdl.restraints.make(atmsel, restraint_type='stereo', spline_on_site=False)
# mdl.restraints.write(file=mpath+mname+'.rsr')
mpdf = atmsel.energy()
# print("before optmimise")
md = modeller.optimizers.molecular_dynamics(output='REPORT')
mol.pmvaction.last = 10000
mol.pmvaction.store = True
# print("optimise")
maxit = self.pd.getLong(self.pd.NUMBERS['mdIterMax'])
temp = self.pd.getLong(self.pd.NUMBERS['mdTemp'])
mol.pmvaction.store = self.pd.getBool(self.pd.CHECKBOXS['store'])
# print((maxit,temp,mol.pmvaction.store))
mol.pmvaction.redraw = self.pd.getBool(self.pd.CHECKBOXS['display'])
md.optimize(atmsel, temperature=float(temp),
max_iterations=int(maxit), actions=mol.pmvaction)
del md
mol.pmvaction.redraw = False
mol.allAtoms.setConformation(0)
return True
def drawAPBS(self, *args):
self.drawSubDialog(self.apbsgui, 255555710)
|
corredD/ePMV
|
epmvGui.py
|
Python
|
gpl-3.0
| 184,458
|
[
"CRYSTAL",
"Gromacs",
"PyMOL"
] |
f9f8f563d598d1d2dc1248fff09ecdb722ffc0e3cc2f3d440c0eda73eb631b93
|
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is a simple driver script for `training_galaxy_props.py` that constructs a PSF object and pixel
scale based on command-line inputs, and then calls `training_galaxy_props.py` with that PSF. For
GREAT3, we ran this script 6 times as follows:
python run_props.py Euclid foo 0.05 real_galaxy_catalog_23.5_real_props_Euclid_0.05.fits
python run_props.py Euclid foo 0.10 real_galaxy_catalog_23.5_real_props_Euclid_0.10.fits
python run_props.py Kolmogorov 0.5 0.2 real_galaxy_catalog_23.5_real_props_Kolm_0.5_0.2.fits
python run_props.py Kolmogorov 0.65 0.2 real_galaxy_catalog_23.5_real_props_Kolm_0.65_0.2.fits
python run_props.py Kolmogorov 0.8 0.2 real_galaxy_catalog_23.5_real_props_Kolm_0.8_0.2.fits
python run_props.py Kolmogorov 0.95 0.2 real_galaxy_catalog_23.5_real_props_Kolm_0.95_0.2.fits
For the two space-based cases (the first two runs), we considered two pixel scales, one of which is
relevant for single-epoch sims (0.05") and the other for multi-epoch sims (0.10"). While the PSF
was constructed to be a simplified Euclid-esque PSF, it does not matter too much whether we use
Euclid or WFIRST-AFTA parameters, since most galaxies pass all the cuts in the space-based sims
anyway.
For the ground-based sims, we consider four values of PSF FWHM (0.5", 0.65", 0.8", and 0.95"), and
when we place cuts on galaxies in the sims, we actually interpolate between those four outputs.
Note that these PSFs are simplified and, in particular, circular, so our use of them to make cuts
means that the selection of galaxies in the simulations does not depend on the direction of the PSF
ellipticity in the sims themselves.
"""
import sys
import galsim
import math
from training_galaxy_props import *
# We need to read the command-line arguments, which are:
# - PSF type: either Kolmogorov or Euclid
# - FWHM in arcsec, only used for Kolmogorov
# - pixel scale
# - output filename
if len(sys.argv) != 5:
raise ValueError("Wrong number of command-line arguments!")
psf_type = sys.argv[1]
allowed_types = ['Kolmogorov', 'Euclid']
if psf_type not in allowed_types:
raise ValueError("PSF type is not allowed!")
# Make the PSF object.
if psf_type == 'Kolmogorov':
psf = galsim.Kolmogorov(fwhm = float(sys.argv[2]))
else:
lambda_m = 800.e-9
primary_diam_m = 1.2
secondary_diam_m = 0.4
jitter_rms = 0.02 # arcsec
lam_over_diam = (lambda_m / primary_diam_m) * 3600. * 180. / math.pi # arcsec
obscuration = secondary_diam_m / primary_diam_m # linear obscuration ratio
euclid_psf = galsim.OpticalPSF(
lam_over_diam=lam_over_diam, obscuration=obscuration, coma1=-0.04, defocus=0.09,
astig2=-0.03, astig1=0.01, oversampling=2.5)
jitter_psf = galsim.Gaussian(sigma=jitter_rms)
psf = galsim.Convolve(euclid_psf, jitter_psf)
# Now call the training galaxy properties script.
training_galaxy_props(psf,
out_filename = sys.argv[4],
pix_scale = float(sys.argv[3]),
size_factor = 0.6,
ps_size = 48)
|
barnabytprowe/great3-public
|
inputs/galdata/run_props.py
|
Python
|
bsd-3-clause
| 4,678
|
[
"Galaxy",
"Gaussian"
] |
08b82801f3debf51b0eff6a26bc7627320494e45014e98870f483567fe2bca15
|
import numpy as np
def gaussian_peak(grid, x0, FWHM):
""" Return a normalized gaussian peak with FWHM=width
in the given grid. """
a = np.sqrt(4*np.log(2)/np.pi)/FWHM
b = 4*np.log(2)/FWHM**2
return a*np.exp( - b*(grid-x0)**2 )
def broaden(grid, values, width, weights=None):
""" Broaden given values with Gaussian distributions over
given grid. Width is the FWHM of the peak, and values
can be provided with different weights (default value
is that the weights are 1 for all values). Each peak
integrates to the corresponding weight."""
if weights == None:
weights = np.ones_like(values)
ret = np.zeros_like(grid)
for val, weight in zip(values, weights):
ret += weight * gaussian_peak(grid, val, width)
return ret
def make_cumulative_plot(grid, values, width, weights_list, labels=None, colors=None):
""" Make a cumulative plot from the values that are broadened with
gaussian distributions of FWHM=width. The weights_list is an
array of the shape [:,len(values)]. """
import pylab
if labels == None:
labels = ['_nolegend_' for i in range(len(weights_list))]
if colors == None:
colors = np.random.random((len(values), 4))
colors[:,3] = 1
low = np.zeros_like(grid)
for weights, color, label in zip(weights_list, colors, labels):
up = low + broaden(grid, values, width, weights)
## MS: incompatibility issue with matplotlib>=3.1
# x, y = pylab.poly_between(grid, low, up)
# pylab.fill(x, y, facecolor=color, edgecolor='none', label=label)
pylab.fill(np.append(grid,low), np.append(up, low),
facecolor=color, edgecolor='none', label=label)
low = up
|
pekkosk/hotbit
|
box/broaden.py
|
Python
|
gpl-2.0
| 1,774
|
[
"Gaussian"
] |
855a2188c4b28b265aa9a8db5e4988fbf0876daf143c67a0bedabcfba347cfc3
|
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API client library."""
from __future__ import unicode_literals
import copy
import os
import json
import logging
import pandas
from . import analyzer
from . import aggregation
from . import definitions
from . import error
from . import graph
from . import index as api_index
from . import resource
from . import search
from . import searchtemplate
from . import story
from . import timeline
logger = logging.getLogger('timesketch_api.sketch')
class Sketch(resource.BaseResource):
"""Timesketch sketch object.
A sketch in Timesketch is a collection of one or more timelines. It has
access control and its own namespace for things like labels and comments.
Attributes:
id: The ID of the sketch.
api: An instance of TimesketchApi object.
"""
# Add in necessary fields in data ingested via a different mechanism.
_NECESSARY_DATA_FIELDS = frozenset([
'timestamp', 'datetime', 'message'])
def __init__(self, sketch_id, api, sketch_name=None):
"""Initializes the Sketch object.
Args:
sketch_id: Primary key ID of the sketch.
api: An instance of TimesketchApi object.
sketch_name: Name of the sketch (optional).
"""
self.id = sketch_id
self.api = api
self._archived = None
self._sketch_name = sketch_name
super().__init__(api=api, resource_uri=f'sketches/{self.id}')
@property
def acl(self):
"""Property that returns back a ACL dict."""
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects')
if not objects:
return {}
data_object = objects[0]
permission_string = data_object.get('all_permissions')
if not permission_string:
return {}
return json.loads(permission_string)
@property
def attributes(self):
"""Property that returns the sketch attributes."""
data = self.lazyload_data(refresh_cache=True)
meta = data.get('meta', {})
return meta.get('attributes', {})
@property
def attributes_table(self):
"""Property that returns the sketch attributes as a data frame."""
data = self.lazyload_data(refresh_cache=True)
meta = data.get('meta', {})
attributes = meta.get('attributes', [])
data_frame = pandas.DataFrame(attributes)
data_frame.columns = ['attribute', 'values', 'ontology']
return data_frame
@property
def description(self):
"""Property that returns sketch description.
Returns:
Sketch description as string.
"""
sketch = self.lazyload_data()
return sketch['objects'][0]['description']
@description.setter
def description(self, description_value):
"""Change the sketch description to a new value."""
if not isinstance(description_value, str):
logger.error('Unable to change the name to a non string value')
return
resource_url = '{0:s}/sketches/{1:d}/'.format(
self.api.api_root, self.id)
data = {
'description': description_value,
}
response = self.api.session.post(resource_url, json=data)
_ = error.check_return_status(response, logger)
# Force the new description to be re-loaded.
_ = self.lazyload_data(refresh_cache=True)
@property
def labels(self):
"""Property that returns the sketch labels."""
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects', [])
if not objects:
return []
sketch_data = objects[0]
label_string = sketch_data.get('label_string', '')
if label_string:
return json.loads(label_string)
return []
@property
def last_activity(self):
"""Property that returns the last activity.
Returns:
Sketch last activity as a string.
"""
data = self.lazyload_data(refresh_cache=True)
meta = data.get('meta', {})
return meta.get('last_activity', '')
@property
def my_acl(self):
"""Property that returns back the ACL for the current user."""
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects')
if not objects:
return []
data_object = objects[0]
permission_string = data_object.get('my_permissions')
if not permission_string:
return []
return json.loads(permission_string)
@property
def name(self):
"""Property that returns sketch name.
Returns:
Sketch name as string.
"""
if not self._sketch_name:
sketch = self.lazyload_data()
self._sketch_name = sketch['objects'][0]['name']
return self._sketch_name
@name.setter
def name(self, name_value):
"""Change the name of the sketch to a new value."""
if not isinstance(name_value, str):
logger.error('Unable to change the name to a non string value')
return
resource_url = '{0:s}/sketches/{1:d}/'.format(
self.api.api_root, self.id)
data = {
'name': name_value,
}
response = self.api.session.post(resource_url, json=data)
_ = error.check_return_status(response, logger)
# Force the new name to be re-loaded.
self._sketch_name = ''
_ = self.lazyload_data(refresh_cache=True)
@property
def status(self):
"""Property that returns sketch status.
Returns:
Sketch status as string.
"""
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects')
if not objects:
return 'Unknown'
if not isinstance(objects, (list, tuple)):
return 'Unknown'
first_object = objects[0]
status_list = first_object.get('status')
if not status_list:
return 'Unknown'
if len(status_list) < 1:
return 'Unknown'
return status_list[0].get('status', 'Unknown')
def add_attribute_list(self, name, values, ontology='text'):
"""Adds or modifies attributes to the sketch.
Args:
name (str): The name of the attribute.
values (list): A list of values (in their correct type according
to the ontology).
ontology (str): The ontology (matches with
/data/ontology.yaml), which defines how the attribute
is interpreted.
Raises:
ValueError: If any of the parameters are of the wrong type.
Returns:
A dict with the results from the operation.
"""
if not isinstance(name, str):
raise ValueError('Name needs to be a string.')
if not isinstance(ontology, str):
raise ValueError('Ontology needs to be a string.')
resource_url = '{0:s}/sketches/{1:d}/attribute/'.format(
self.api.api_root, self.id)
data = {
'name': name,
'values': values,
'ontology': ontology,
}
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
logger.error('Unable to add the attribute to the sketch.')
return error.get_response_json(response, logger)
def add_attribute(self, name, value, ontology='text'):
"""Adds or modifies an attribute to the sketch.
Args:
name (str): The name of the attribute.
value (str): Value of the attribute, stored as a string.
ontology (str): The ontology (matches with
/data/ontology.yaml), which defines
how the attribute is interpreted.
Raises:
ValueError: If any of the parameters are of the wrong type.
Returns:
A dict with the results from the operation.
"""
if not isinstance(name, str):
raise ValueError('Name needs to be a string.')
return self.add_attribute_list(
name=name, values=[value], ontology=ontology)
def add_sketch_label(self, label):
"""Add a label to the sketch.
Args:
label (str): A string with the label to add to the sketch.
Returns:
bool: A boolean to indicate whether the label was successfully
added to the sketch.
"""
if label in self.labels:
logger.error(
'Label [{0:s}] already applied to sketch.'.format(label))
return False
resource_url = '{0:s}/sketches/{1:d}/'.format(
self.api.api_root, self.id)
data = {
'labels': [label],
'label_action': 'add',
}
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
logger.error(
'Unable to add the label [{0:s}] to the sketch.'.format(label))
return status
def remove_attribute(self, name, ontology):
"""Remove an attribute from the sketch.
Args:
name (str): The name of the attribute.
ontology (str): The ontology (matches with
/data/ontology.yaml), which defines how the attribute
is interpreted.
Raises:
ValueError: If any of the parameters are of the wrong type.
Returns:
Boolean value whether the attribute was successfully
removed or not.
"""
if not isinstance(name, str):
raise ValueError('Name needs to be a string.')
resource_url = '{0:s}/sketches/{1:d}/attribute/'.format(
self.api.api_root, self.id)
data = {
'name': name,
'ontology': ontology,
}
response = self.api.session.delete(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
logger.error('Unable to remove the attribute from the sketch.')
return status
def remove_sketch_label(self, label):
"""Remove a label from the sketch.
Args:
label (str): A string with the label to remove from the sketch.
Returns:
bool: A boolean to indicate whether the label was successfully
removed from the sketch.
"""
if label not in self.labels:
logger.error(
'Unable to remove label [{0:s}], not a label '
'attached to this sketch.'.format(label))
return False
resource_url = '{0:s}/sketches/{1:d}/'.format(
self.api.api_root, self.id)
data = {
'labels': [label],
'label_action': 'remove',
}
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
logger.error('Unable to remove the label from the sketch.')
return status
def create_view(
self, name, query_string='', query_dsl='', query_filter=None):
"""Create a view object.
Args:
name (str): the name of the view.
query_string (str): OpenSearch query string. This is optional
yet either a query string or a query DSL is required.
query_dsl (str): OpenSearch query DSL as JSON string. This is
optional yet either a query string or a query DSL is required.
query_filter (dict): Filter for the query as a dict.
Raises:
ValueError: if neither query_string nor query_dsl is provided or
if query_filter is not a dict.
RuntimeError: if a view wasn't created for some reason.
Returns:
A search.Search object that has been saved to the database.
"""
logger.warning(
'View objects will be deprecated shortly, use search.Search '
'and call the search_obj.save() function to save a search.')
if not (query_string or query_dsl):
raise ValueError('You need to supply a query string or a dsl')
if self.is_archived():
raise RuntimeError('Unable create a view on an archived sketch.')
search_obj = search.Search(sketch=self)
search_obj.from_manual(
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter
)
search_obj.name = name
search_obj.save()
return search_obj
def create_story(self, title):
"""Create a story object.
Args:
title: the title of the story.
Raises:
RuntimeError: if a story wasn't created for some reason.
Returns:
A story object (instance of Story) for the newly
created story.
"""
if self.is_archived():
raise RuntimeError(
'Unable to create a story in an archived sketch.')
resource_url = '{0:s}/sketches/{1:d}/stories/'.format(
self.api.api_root, self.id)
data = {
'title': title,
'content': ''
}
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
error.error_message(
response, 'Unable to create a story', error=RuntimeError)
response_json = error.get_response_json(response, logger)
story_dict = response_json.get('objects', [{}])[0]
return story.Story(
story_id=story_dict.get('id', 0),
sketch=self,
api=self.api)
def delete(self):
"""Deletes the sketch."""
if self.is_archived():
raise RuntimeError(
'Unable to delete an archived sketch, first '
'unarchive then delete.')
resource_url = '{0:s}/sketches/{1:d}/'.format(
self.api.api_root, self.id)
response = self.api.session.delete(resource_url)
return error.check_return_status(response, logger)
def add_to_acl(
self, user_list=None, group_list=None,
make_public=False, permissions=None):
"""Add users or groups to the sketch ACL.
Args:
user_list: optional list of users to add to the ACL
of the sketch. Each user is a string.
group_list: optional list of groups to add to the ACL
of the sketch. Each user is a string.
make_public: Optional boolean indicating the sketch should be
marked as public.
permissions: optional list of permissions (read, write, delete).
If not the default set of permissions are applied (read, write)
Returns:
A boolean indicating whether the ACL change was successful.
"""
if not user_list and not group_list and not make_public:
return False
resource_url = '{0:s}/sketches/{1:d}/collaborators/'.format(
self.api.api_root, self.id)
data = {}
if group_list:
group_list_corrected = [str(x).strip() for x in group_list]
data['groups'] = group_list_corrected
if user_list:
user_list_corrected = [str(x).strip() for x in user_list]
data['users'] = user_list_corrected
if make_public:
data['public'] = 'true'
if permissions:
allowed_permissions = set(['read', 'write', 'delete'])
use_permissions = list(
allowed_permissions.intersection(set(permissions)))
if set(use_permissions) != set(permissions):
logger.warning('Some permissions are invalid: {0:s}'.format(
', '.join(list(
set(permissions).difference(set(use_permissions))))))
if not use_permissions:
logger.error('No permissions left to add.')
return False
data['permissions'] = json.dumps(use_permissions)
if not data:
return True
response = self.api.session.post(resource_url, json=data)
# Refresh the sketch data to reflect ACL changes.
_ = self.lazyload_data(refresh_cache=True)
return error.check_return_status(response, logger)
def list_aggregation_groups(self):
"""List all saved aggregation groups for this sketch.
Returns:
List of aggregation groups (instances of AggregationGroup objects)
"""
if self.is_archived():
raise RuntimeError(
'Unable to list aggregation groups on an archived sketch.')
groups = []
data = self.api.fetch_resource_data(
f'sketches/{self.id}/aggregation/group/')
for group_dict in data.get('objects', []):
if not group_dict.get('id'):
continue
group = aggregation.AggregationGroup(sketch=self)
group.from_saved(group_dict.get('id'))
groups.append(group)
return groups
def list_aggregations(self, include_labels=None, exclude_labels=None):
"""List all saved aggregations for this sketch.
Args:
include_labels (list): list of strings with labels. If defined
then only return aggregations that have the label in the list.
exclude_labels (list): list of strings with labels. If defined
then only return aggregations that don't have a label in the
list. include_labels will be processed first in case both are
defined.
Returns:
List of aggregations (instances of Aggregation objects)
"""
if self.is_archived():
raise RuntimeError(
'Unable to list aggregations on an archived sketch.')
aggregations = []
data = self.api.fetch_resource_data(f'sketches/{self.id}/aggregation/')
objects = data.get('objects')
if not objects:
return aggregations
if not isinstance(objects, (list, tuple)):
return aggregations
object_list = objects[0]
if not isinstance(object_list, (list, tuple)):
return aggregations
for aggregation_dict in object_list:
agg_id = aggregation_dict.get('id')
group_id = aggregation_dict.get('aggregationgroup_id')
if group_id:
continue
label_string = aggregation_dict.get('label_string', '')
if label_string:
labels = json.loads(label_string)
else:
labels = []
if include_labels:
if not any(x in include_labels for x in labels):
continue
if exclude_labels:
if any(x in exclude_labels for x in labels):
continue
aggregation_obj = aggregation.Aggregation(sketch=self)
aggregation_obj.from_saved(aggregation_id=agg_id)
aggregations.append(aggregation_obj)
return aggregations
def list_graphs(self):
"""Returns a list of stored graphs."""
if self.is_archived():
raise RuntimeError(
'Unable to list graphs on an archived sketch.')
resource_uri = (
f'{self.api.api_root}/sketches/{self.id}/graphs/')
response = self.api.session.get(resource_uri)
response_json = error.get_response_json(response, logger)
objects = response_json.get('objects')
if not objects:
logger.warning('No graphs discovered.')
return []
return_list = []
graph_list = objects[0]
for graph_dict in graph_list:
graph_obj = graph.Graph(sketch=self)
graph_obj.from_saved(graph_dict.get('id'))
return_list.append(graph_obj)
return return_list
def get_analyzer_status(self, as_sessions=False):
"""Returns a list of started analyzers and their status.
Args:
as_sessions (bool): optional, if set to True then a list of
AnalyzerResult objects will be returned. Defaults to
returning a list of dicts.
Returns:
If "as_sessions" is set then a list of AnalyzerResult gets
returned, otherwise a list of dict objects that contains
status information of each analyzer run. The dict contains
information about what timeline it ran against, the
results and current status of the analyzer run.
"""
if self.is_archived():
raise RuntimeError(
'Unable to list analyzer status on an archived sketch.')
stats_list = []
sessions = []
for timeline_obj in self.list_timelines():
resource_uri = (
'{0:s}/sketches/{1:d}/timelines/{2:d}/analysis').format(
self.api.api_root, self.id, timeline_obj.id)
response = self.api.session.get(resource_uri)
response_json = error.get_response_json(response, logger)
objects = response_json.get('objects')
if not objects:
continue
for result in objects[0]:
session_id = result.get('analysissession_id')
stat = {
'index': timeline_obj.index,
'timeline_id': timeline_obj.id,
'session_id': session_id,
'analyzer': result.get('analyzer_name', 'N/A'),
'results': result.get('result', 'N/A'),
'status': 'N/A',
}
if as_sessions and session_id:
sessions.append(analyzer.AnalyzerResult(
timeline_id=timeline_obj.id, session_id=session_id,
sketch_id=self.id, api=self.api))
status = result.get('status', [])
if len(status) == 1:
stat['status'] = status[0].get('status', 'N/A')
stats_list.append(stat)
if as_sessions:
return sessions
return stats_list
def get_aggregation(self, aggregation_id):
"""Return a stored aggregation.
Args:
aggregation_id: id of the stored aggregation.
Returns:
An aggregation object, if stored (instance of Aggregation),
otherwise None object.
"""
if self.is_archived():
raise RuntimeError(
'Unable to get aggregations on an archived sketch.')
for aggregation_obj in self.list_aggregations():
if aggregation_obj.id == aggregation_id:
return aggregation_obj
return None
def get_aggregation_group(self, group_id):
"""Return a stored aggregation group.
Args:
group_id: id of the stored aggregation group.
Returns:
An aggregation group object (instance of AggregationGroup)
if stored, otherwise None object.
"""
if self.is_archived():
raise RuntimeError(
'Unable to get aggregation groups on an archived sketch.')
for group_obj in self.list_aggregation_groups():
if group_obj.id == group_id:
return group_obj
return None
def get_story(self, story_id=None, story_title=None):
"""Returns a story object that is stored in the sketch.
Args:
story_id: an integer indicating the ID of the story to
be fetched. Defaults to None.
story_title: a string with the title of the story. Optional
and defaults to None.
Returns:
A story object (instance of Story) if one is found. Returns
a None if neither story_id or story_title is defined or if
the view does not exist. If a story title is defined and
not a story id, the first story that is found with the same
title will be returned.
"""
if self.is_archived():
raise RuntimeError(
'Unable to get stories on an archived sketch.')
if story_id is None and story_title is None:
return None
for story_obj in self.list_stories():
if story_id and story_id == story_obj.id:
return story_obj
if story_title and story_title.lower() == story_obj.title.lower():
return story_obj
return None
def get_view(self, view_id=None, view_name=None):
"""Returns a saved search object that is stored in the sketch.
Args:
view_id: an integer indicating the ID of the saved search to
be fetched. Defaults to None.
view_name: a string with the name of the saved search. Optional
and defaults to None.
Returns:
A search object (instance of search.Search) if one is found.
Returns a None if neither view_id or view_name is defined or if
the search does not exist.
"""
logger.warning(
'This function is about to be deprecated, use '
'get_saved_search() instead.')
return self.get_saved_search(search_id=view_id, search_name=view_name)
def get_saved_search(self, search_id=None, search_name=None):
"""Returns a saved search object that is stored in the sketch.
Args:
view_id: an integer indicating the ID of the view to
be fetched. Defaults to None.
view_name: a string with the name of the view. Optional
and defaults to None.
Returns:
A search object (instance of search.Search) if one is found.
Returns a None if neither search_id or search_name is defined or if
the search does not exist.
"""
if self.is_archived():
raise RuntimeError(
'Unable to get saved searches on an archived sketch.')
if search_id is None and search_name is None:
return None
for search_obj in self.list_saved_searches():
if search_id and search_id == search_obj.id:
return search_obj
if search_name and search_name.lower() == search_obj.name.lower():
return search_obj
return None
def get_timeline(self, timeline_id=None, timeline_name=None):
"""Returns a timeline object that is stored in the sketch.
Args:
timeline_id: an integer indicating the ID of the timeline to
be fetched. Defaults to None.
timeline_name: a string with the name of the timeline. Optional
and defaults to None.
Returns:
A timeline object (instance of Timeline) if one is found. Returns
a None if neither timeline_id or timeline_name is defined or if
the timeline does not exist.
"""
if self.is_archived():
raise RuntimeError(
'Unable to get timelines on an archived sketch.')
if timeline_id is None and timeline_name is None:
return None
for timeline_ in self.list_timelines():
if timeline_id and timeline_id == timeline_.id:
return timeline_
if timeline_name:
if timeline_name.lower() == timeline_.name.lower():
return timeline_
return None
def list_stories(self):
"""Get a list of all stories that are attached to the sketch.
Returns:
List of stories (instances of Story objects)
"""
if self.is_archived():
raise RuntimeError(
'Unable to list stories on an archived sketch.')
story_list = []
resource_url = '{0:s}/sketches/{1:d}/stories/'.format(
self.api.api_root, self.id)
response = self.api.session.get(resource_url)
response_json = error.get_response_json(response, logger)
story_objects = response_json.get('objects')
if not story_objects:
return story_list
if not len(story_objects) == 1:
return story_list
stories = story_objects[0]
for story_dict in stories:
story_list.append(story.Story(
story_id=story_dict.get('id', -1),
sketch=self,
api=self.api))
return story_list
def list_views(self):
"""List all saved views for this sketch.
Returns:
List of search object (instance of search.Search).
"""
logger.warning(
'This function will soon be deprecated, use list_saved_searches() '
'instead.')
return self.list_saved_searches()
def list_saved_searches(self):
"""List all saved searches for this sketch.
Returns:
List of search object (instance of search.Search).
"""
if self.is_archived():
raise RuntimeError(
'Unable to list saved searches on an archived sketch.')
data = self.lazyload_data()
searches = []
meta = data.get('meta', {})
for saved_search in meta.get('views', []):
search_obj = search.Search(sketch=self)
try:
search_obj.from_saved(saved_search.get('id'))
searches.append(search_obj)
except ValueError:
logger.error(
'Unable to load a saved search with ID: {0:d}'.format(
saved_search.get('id', 0)), exc_info=True)
return searches
def list_search_templates(self):
"""Get a list of all search templates that are available.
Returns:
List of searchtemplate.SearchTemplate object instances.
"""
response = self.api.fetch_resource_data('searchtemplate/')
objects = response.get('objects', [])
if not objects:
return []
template_dicts = objects[0]
template_list = []
for template_dict in template_dicts:
template_obj = searchtemplate.SearchTemplate(api=self.api)
template_obj.from_saved(template_dict.get('id'), sketch_id=self.id)
template_list.append(template_obj)
return template_list
def list_timelines(self):
"""List all timelines for this sketch.
Returns:
List of timelines (instances of Timeline objects)
"""
if self.is_archived():
raise RuntimeError(
'Unable to list timelines on an archived sketch.')
timelines = []
data = self.lazyload_data()
objects = data.get('objects')
if not objects:
return timelines
for timeline_dict in objects[0].get('timelines', []):
timeline_obj = timeline.Timeline(
timeline_id=timeline_dict['id'],
sketch_id=self.id,
api=self.api,
name=timeline_dict['name'],
searchindex=timeline_dict['searchindex']['index_name'])
timelines.append(timeline_obj)
return timelines
# pylint: disable=unused-argument
def upload(self, timeline_name, file_path, es_index=None):
"""Deprecated function to upload data, does nothing.
Args:
timeline_name: Name of the resulting timeline.
file_path: Path to the file to be uploaded.
es_index: Index name for the ES database
Raises:
RuntimeError: If this function is used, since it has been
deprecated in favor of the importer client.
"""
message = (
'This function has been deprecated, use the CLI tool: '
'timesketch_importer: https://github.com/google/timesketch/blob/'
'master/docs/UploadData.md#using-the-importer-clie-tool or the '
'importer library: https://github.com/google/timesketch/blob/'
'master/docs/UploadDataViaAPI.md')
logger.error(message)
raise RuntimeError(message)
# pylint: disable=unused-argument
def add_timeline(self, searchindex):
"""Deprecated function to add timeline to sketch.
Args:
searchindex: SearchIndex object instance.
Raises:
RuntimeError: If this function is called.
"""
message = (
'This function has been deprecated, since adding already existing '
'indices to a sketch is no longer supported.')
logger.error(message)
raise RuntimeError(message)
def explore(self,
query_string=None,
query_dsl=None,
query_filter=None,
view=None,
return_fields=None,
as_pandas=False,
max_entries=None,
file_name='',
as_object=False):
"""Explore the sketch.
Args:
query_string (str): OpenSearch query string.
query_dsl (str): OpenSearch query DSL as JSON string.
query_filter (dict): Filter for the query as a dict.
view: View object instance (optional).
return_fields (str): A comma separated string with a list of fields
that should be included in the response. Optional and defaults
to None.
as_pandas (bool): Optional bool that determines if the results
should be returned back as a dictionary or a Pandas DataFrame.
max_entries (int): Optional integer denoting a best effort to limit
the output size to the number of events. Events are read in,
10k at a time so there may be more events in the answer back
than this number denotes, this is a best effort.
file_name (str): Optional filename, if provided the results of
the query will be exported to a ZIP file instead of being
returned back as a dict or a pandas DataFrame. The ZIP file
will contain a METADATA file and a CSV with the results from
the query.
as_object (bool): Optional bool that determines whether the
function will return a search object back instead of raw
results.
Returns:
Dictionary with query results, a pandas DataFrame if as_pandas
is set to True or a search.Search object if as_object is set
to True. If file_name is provided then no value will be
returned.
Raises:
ValueError: if unable to query for the results.
RuntimeError: if the query is missing needed values, or if the
sketch is archived.
"""
logger.warning(
'Using this function is discouraged, please consider using '
'the search.Search object instead, which is more flexible.')
if not (query_string or query_filter or query_dsl or view):
raise RuntimeError('You need to supply a query or view')
if self.is_archived():
raise RuntimeError('Unable to query an archived sketch.')
search_obj = search.Search(sketch=self)
if view:
logger.warning(
'View objects will be deprecated soon, use search.Search '
'objects instead.')
search_obj.from_saved(view.id)
else:
search_obj.from_manual(
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
return_fields=return_fields,
max_entries=max_entries
)
if as_object:
return search_obj
if file_name:
return search_obj.to_file(file_name)
if as_pandas:
return search_obj.to_pandas()
return search_obj.to_dict()
def list_available_analyzers(self):
"""Returns a list of available analyzers."""
resource_url = '{0:s}/sketches/{1:d}/analyzer/'.format(
self.api.api_root, self.id)
response = self.api.session.get(resource_url)
return error.get_response_json(response, logger)
def run_analyzer(
self, analyzer_name, analyzer_kwargs=None, timeline_id=None,
timeline_name=None):
"""Run an analyzer on a timeline.
Args:
analyzer_name: the name of the analyzer class to run against the
timeline.
analyzer_kwargs: optional dict with parameters for the analyzer.
This is optional and just for those analyzers that can accept
further parameters.
timeline_id: the ID of the timeline. This is optional and only
required if timeline_name is not set.
timeline_name: the name of the timeline in the timesketch UI. This
is optional and only required if timeline_id is not set. If
there are more than a single timeline with the same name a
timeline_id is required.
Raises:
error.UnableToRunAnalyzer: if not able to run the analyzer.
Returns:
If the analyzer runs successfully return back an AnalyzerResult
object.
"""
# TODO: Deprecate this function.
logger.warning(
'This function is about to be deprecated, please use the '
'`.run_analyzer()` function of a timeline object instead. '
'This function does not support all functionality of the newer '
'implementation in the timeline object.')
if self.is_archived():
raise error.UnableToRunAnalyzer(
'Unable to run an analyzer on an archived sketch.')
if not timeline_id and not timeline_name:
return (
'Unable to run analyzer, need to define either '
'timeline ID or name')
if timeline_name:
sketch = self.lazyload_data(refresh_cache=True)
timelines = []
for timeline_dict in sketch['objects'][0]['timelines']:
name = timeline_dict.get('name', '')
if timeline_name.lower() == name.lower():
timelines.append(timeline_dict.get('id'))
if not timelines:
raise error.UnableToRunAnalyzer(
'No timelines with the name: {0:s} were found'.format(
timeline_name))
if len(timelines) != 1:
raise error.UnableToRunAnalyzer(
'There are {0:d} timelines defined in the sketch with '
'this name, please use a unique name or a '
'timeline ID'.format(len(timelines)))
timeline_id = timelines[0]
if not timeline_id:
raise error.UnableToRunAnalyzer(
'Unable to run an analyzer, not able to find a timeline.')
timeline_obj = timeline.Timeline(
timeline_id=timeline_id,
sketch_id=self.id,
api=self.api)
return timeline_obj.run_analyzer(
analyzer_name=analyzer_name, analyzer_kwargs=analyzer_kwargs)
def remove_acl(
self, user_list=None, group_list=None, remove_public=False,
permissions=None):
"""Remove users or groups to the sketch ACL.
Args:
user_list: optional list of users to remove from the ACL
of the sketch. Each user is a string.
group_list: optional list of groups to remove from the ACL
of the sketch. Each user is a string.
remove_public: Optional boolean indicating the sketch should be
no longer marked as public.
permissions: optional list of permissions (read, write, delete).
If not the default set of permissions are applied (read, write)
Returns:
A boolean indicating whether the ACL change was successful.
"""
if not user_list and not group_list:
return True
resource_url = '{0:s}/sketches/{1:d}/collaborators/'.format(
self.api.api_root, self.id)
data = {}
if group_list:
group_list_corrected = [str(x).strip() for x in group_list]
data['remove_groups'] = group_list_corrected
if user_list:
user_list_corrected = [str(x).strip() for x in user_list]
data['remove_users'] = user_list_corrected
if remove_public:
data['public'] = 'false'
if permissions:
allowed_permissions = set(['read', 'write', 'delete'])
permissions = list(
allowed_permissions.intersection(set(permissions)))
data['permissions'] = json.dumps(permissions)
if not data:
return True
response = self.api.session.post(resource_url, json=data)
# Refresh the sketch data to reflect ACL changes.
_ = self.lazyload_data(refresh_cache=True)
return error.check_return_status(response, logger)
def aggregate(self, aggregate_dsl):
"""Run an aggregation request on the sketch.
Args:
aggregate_dsl: OpenSearch aggregation query DSL string.
Returns:
An aggregation object (instance of Aggregation).
Raises:
ValueError: if unable to query for the results.
"""
if self.is_archived():
raise ValueError(
'Unable to run an aggregation on an archived sketch.')
if not aggregate_dsl:
raise RuntimeError(
'You need to supply an aggregation query DSL string.')
aggregation_obj = aggregation.Aggregation(sketch=self)
aggregation_obj.from_manual(aggregate_dsl)
return aggregation_obj
def list_available_aggregators(self):
"""Return a list of all available aggregators in the sketch."""
data = self.lazyload_data()
meta = data.get('meta', {})
always_supported = [{
'parameter': 'index',
'notes': (
'List of indices or timeline IDS to limit the aggregation'),
'type': 'text-input',
}]
entries = []
for name, options in iter(meta.get('aggregators', {}).items()):
for field in options.get('form_fields', []):
entry = {
'aggregator_name': name,
'parameter': field.get('name'),
'notes': field.get('label')
}
if field.get('type') == 'ts-dynamic-form-select-input':
entry['value'] = '|'.join(field.get('options', []))
entry['type'] = 'selection'
else:
_, _, entry['type'] = field.get('type').partition(
'ts-dynamic-form-')
entries.append(entry)
for entry_dict in always_supported:
entry = copy.copy(entry_dict)
entry['aggregator_name'] = name
entries.append(entry)
return pandas.DataFrame(entries)
def run_aggregator(
self, aggregator_name, aggregator_parameters):
"""Run an aggregator class.
Args:
aggregator_name: Name of the aggregator to run.
aggregator_parameters: A dict with key/value pairs of parameters
the aggregator needs to run.
Returns:
An aggregation object (instance of Aggregator).
"""
if self.is_archived():
raise RuntimeError(
'Unable to run an aggregator on an archived sketch.')
aggregation_obj = aggregation.Aggregation(sketch=self)
aggregation_obj.from_aggregator_run(
aggregator_name=aggregator_name,
aggregator_parameters=aggregator_parameters
)
return aggregation_obj
def store_aggregation(
self, name, description, aggregator_name, aggregator_parameters,
chart_type=''):
"""Store an aggregation in the sketch.
Args:
name: a name that will be associated with the aggregation.
description: description of the aggregation, visible in the UI.
aggregator_name: name of the aggregator class.
aggregator_parameters: parameters of the aggregator.
chart_type: string representing the chart type.
Raises:
RuntimeError: if the client is unable to store the aggregation.
Returns:
A stored aggregation object or None if not stored.
"""
if self.is_archived():
raise RuntimeError(
'Unable to store an aggregator on an archived sketch.')
# TODO: Deprecate this function.
logger.warning(
'This function is about to be deprecated, please use the '
'`.save()` function of an aggregation object instead')
aggregator_obj = self.run_aggregator(
aggregator_name, aggregator_parameters)
aggregator_obj.name = name
aggregator_obj.description = description
if chart_type:
aggregator_obj.chart_type = chart_type
if aggregator_obj.save():
_ = self.lazyload_data(refresh_cache=True)
return aggregator_obj
return None
def comment_event(self, event_id, index, comment_text):
"""Adds a comment to a single event.
Args:
event_id: id of the event
index: The OpenSearch index name
comment_text: text to add as a comment
Returns:
a json data of the query.
"""
if self.is_archived():
raise RuntimeError(
'Unable to comment on an event in an archived sketch.')
form_data = {
'annotation': comment_text,
'annotation_type': 'comment',
'events': {
'_id': event_id,
'_index': index,
'_type': 'generic_event'}
}
resource_url = '{0:s}/sketches/{1:d}/event/annotate/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
return error.get_response_json(response, logger)
def label_events(self, events, label_name):
"""Labels one or more events with label_name.
Args:
events: Array of JSON objects representing events.
label_name: String to label the event with.
Returns:
Dictionary with query results.
"""
if self.is_archived():
raise RuntimeError(
'Unable to label events in an archived sketch.')
form_data = {
'annotation': label_name,
'annotation_type': 'label',
'events': events
}
resource_url = '{0:s}/sketches/{1:d}/event/annotate/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
return error.get_response_json(response, logger)
def tag_events(self, events, tags, verbose=False):
"""Tags one or more events with a list of tags.
Args:
events: Array of JSON objects representing events.
tags: List of tags (str) to add to the events.
verbose: Bool that determines whether extra information
is added to the meta dict that gets returned.
Raises:
ValueError: if tags is not a list of strings.
RuntimeError: if the sketch is archived.
Returns:
A dict with the results from the tagging operation.
"""
if self.is_archived():
raise RuntimeError(
'Unable to tag events in an archived sketch.')
if not isinstance(tags, list):
raise ValueError('Tags need to be a list.')
if not all(isinstance(x, str) for x in tags):
raise ValueError('Tags need to be a list of strings.')
form_data = {
'tag_string': json.dumps(tags),
'events': events,
'verbose': verbose,
}
resource_url = '{0:s}/sketches/{1:d}/event/tagging/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
status = error.check_return_status(response, logger)
if not status:
return {
'number_of_events': len(events),
'number_of_events_with_tag': 0,
'success': status
}
response_json = error.get_response_json(response, logger)
meta = response_json.get('meta', {})
meta['total_number_of_events_sent_by_client'] = len(events)
return meta
def search_by_label(
self, label_name, return_fields=None, max_entries=None,
as_pandas=False):
"""Searches for all events containing a given label.
Args:
label_name: A string representing the label to search for.
return_fields (str): A comma separated string with a list of fields
that should be included in the response. Optional and defaults
to None.
max_entries (int): Optional integer denoting a best effort to limit
the output size to the number of events. Events are read in,
10k at a time so there may be more events in the answer back
than this number denotes, this is a best effort.
as_pandas: Optional bool that determines if the results should
be returned back as a dictionary or a Pandas DataFrame.
Returns:
A dictionary with query results.
"""
if self.is_archived():
raise RuntimeError(
'Unable to search for labels in an archived sketch.')
logger.warning(
'This function will be deprecated soon. Use the search.Search '
'object instead and add a search.LabelChip to search for labels.')
query = {
'nested': {
'path': 'timesketch_label',
'query': {
'bool': {
'must': [
{
'term': {
'timesketch_label.name': label_name
}
},
{
'term': {
'timesketch_label.sketch_id': self.id
}
}
]
}
}
}
}
return self.explore(
query_dsl=json.dumps({'query': query}), return_fields=return_fields,
max_entries=max_entries, as_pandas=as_pandas)
def add_scenario(self, scenario_name):
"""Adds a investigative scenario to the sketch.
Args:
scenario_name (str): Name of the scenario to add.
Raises:
RuntimeError: If sketch is archived.
Returns:
Dictionary with scenario.
"""
if self.is_archived():
raise RuntimeError(
'Unable to add a scenario to an archived sketch')
form_data = {
'scenario_name': scenario_name
}
resource_url = '{0:s}/sketches/{1:d}/scenarios/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
return error.get_response_json(response, logger)
def add_event(
self, message, date, timestamp_desc, attributes=None,
tags=None):
"""Adds an event to the sketch specific timeline.
Args:
message: A string that will be used as the message string.
date: A string with the timestamp of the message. This should be
in a human readable format, eg: "2020-09-03T22:52:21".
timestamp_desc : Description of the timestamp.
attributes: A dict of extra attributes to add to the event.
tags: A list of strings to include as tags.
Raises:
ValueError: If tags is not a list of strings or attributes
is not a dict.
Returns:
Dictionary with query results.
"""
if self.is_archived():
raise RuntimeError(
'Unable to add an event to an archived sketch.')
if tags is None:
tags = []
if not isinstance(tags, list):
raise ValueError('Tags needs to be a list.')
if any(not isinstance(tag, str) for tag in tags):
raise ValueError('Tags needs to be a list of strings.')
if attributes is None:
attributes = {}
if not isinstance(attributes, dict):
raise ValueError('Attributes needs to be a dict.')
form_data = {
'date_string': date,
'timestamp_desc': timestamp_desc,
'message': message,
'tag': tags
}
duplicate_attributes = [key for key in attributes if key in form_data]
if duplicate_attributes:
duplicates = ', '.join(duplicate_attributes)
raise ValueError(
f'Following attributes cannot overwrite values '
f'already set: {duplicates}')
form_data['attributes'] = attributes
resource_url = '{0:s}/sketches/{1:d}/event/create/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
return error.get_response_json(response, logger)
def is_archived(self):
"""Return a boolean indicating whether the sketch has been archived."""
if self._archived is not None:
return self._archived
resource_url = '{0:s}/sketches/{1:d}/archive/'.format(
self.api.api_root, self.id)
response = self.api.session.get(resource_url)
data = error.get_response_json(response, logger)
meta = data.get('meta', {})
self._archived = meta.get('is_archived', False)
return self._archived
def archive(self):
"""Archive a sketch and return a boolean whether it was successful."""
if self.is_archived():
logger.error('Sketch already archived.')
return False
resource_url = '{0:s}/sketches/{1:d}/archive/'.format(
self.api.api_root, self.id)
data = {
'action': 'archive'
}
response = self.api.session.post(resource_url, json=data)
return_status = error.check_return_status(response, logger)
self._archived = return_status
return return_status
def unarchive(self):
"""Unarchives a sketch and return boolean whether it was successful."""
if not self.is_archived():
logger.error('Sketch wasn\'t archived.')
return False
resource_url = '{0:s}/sketches/{1:d}/archive/'.format(
self.api.api_root, self.id)
data = {
'action': 'unarchive'
}
response = self.api.session.post(resource_url, json=data)
return_status = error.check_return_status(response, logger)
# return_status = True means unarchive is successful or that
# the archive status is False.
self._archived = not return_status
return return_status
def export(self, file_path):
"""Exports the content of the sketch to a ZIP file.
Args:
file_path (str): a file path where the ZIP file will be saved.
Raises:
RuntimeError: if sketch cannot be exported.
"""
directory = os.path.dirname(file_path)
if not os.path.isdir(directory):
raise RuntimeError(
'The directory needs to exist, please create: '
'{0:s} first'.format(directory))
if not file_path.lower().endswith('.zip'):
logger.warning('File does not end with a .zip, adding it.')
file_path = '{0:s}.zip'.format(file_path)
if os.path.isfile(file_path):
raise RuntimeError('File [{0:s}] already exists.'.format(file_path))
form_data = {
'action': 'export'
}
resource_url = '{0:s}/sketches/{1:d}/archive/'.format(
self.api.api_root, self.id)
response = self.api.session.post(resource_url, json=form_data)
status = error.check_return_status(response, logger)
if not status:
error.error_message(
response, message='Failed exporting the sketch',
error=RuntimeError)
with open(file_path, 'wb') as fw:
fw.write(response.content)
def generate_timeline_from_es_index(
self, es_index_name, name, index_name='', description='',
provider='Manually added to OpenSearch',
context='Added via API client', data_label='OpenSearch',
status='ready'):
"""Creates and returns a Timeline from OpenSearch data.
This function can be used to import data into a sketch that was
ingested via different mechanism, such as ELK, etc.
The function creates the necessary structures (SearchIndex and a
Timeline) for Timesketch to be able to properly support it.
Args:
es_index_name: name of the index in OpenSearch.
name: string with the name of the timeline.
index_name: optional string for the SearchIndex name, defaults
to the same as the es_index_name.
description: optional string with a description of the timeline.
provider: optional string with the provider name for the data
source of the imported data. Defaults to "Manually added
to OpenSearch".
context: optional string with the context for the data upload,
defaults to "Added via API client".
data_label: optional string with the data label of the OpenSearch
data, defaults to "OpenSearch".
status: Optional string, if provided will be used as a status
for the searchindex, valid options are: "ready", "fail",
"processing", "timeout". Defaults to "ready".
Raises:
ValueError: If there are errors in the generation of the
timeline.
Returns:
Instance of a Timeline object.
"""
if not es_index_name:
raise ValueError('ES index needs to be provided.')
if not name:
raise ValueError('Timeline name needs to be provided.')
# Step 1: Make sure the index doesn't exist already.
for index_obj in self.api.list_searchindices():
if index_obj is None:
continue
if index_obj.index_name == es_index_name:
raise ValueError(
'Unable to add the ES index, since it already exists.')
# Step 2: Create a SearchIndex.
resource_url = f'{self.api.api_root}/searchindices/'
form_data = {
'searchindex_name': index_name or es_index_name,
'es_index_name': es_index_name,
}
response = self.api.session.post(resource_url, json=form_data)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Error creating searchindex',
error=ValueError)
response_dict = error.get_response_json(response, logger)
objects = response_dict.get('objects')
if not objects:
raise ValueError(
'Unable to create a SearchIndex, try again or file an '
'issue on GitHub.')
searchindex_id = objects[0].get('id')
# Step 3: Verify mappings to make sure data conforms.
index_obj = api_index.SearchIndex(searchindex_id, api=self.api)
index_fields = set(index_obj.fields)
if not self._NECESSARY_DATA_FIELDS.issubset(index_fields):
index_obj.status = 'fail'
raise ValueError(
'Unable to ingest data since it is missing required '
'fields: {0:s} [ingested data contains these fields: '
'{1:s}]'.format(
', '.join(self._NECESSARY_DATA_FIELDS.difference(
index_fields)), '|'.join(index_fields)))
if status:
index_obj.status = status
# Step 4: Create the Timeline.
resource_url = f'{self.api.api_root}/sketches/{self.id}/timelines/'
form_data = {'timeline': searchindex_id, 'timeline_name': name}
response = self.api.session.post(resource_url, json=form_data)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Error creating a timeline object',
error=ValueError)
response_dict = error.get_response_json(response, logger)
objects = response_dict.get('objects')
if not objects:
raise ValueError(
'Unable to create a Timeline, try again or file an '
'issue on GitHub.')
timeline_dict = objects[0]
timeline_obj = timeline.Timeline(
timeline_id=timeline_dict['id'],
sketch_id=self.id,
api=self.api,
name=timeline_dict['name'],
searchindex=timeline_dict['searchindex']['index_name'])
# Step 5: Add the timeline ID into the dataset.
resource_url = (
f'{self.api.api_root}/sketches/{self.id}/event/add_timeline_id/')
form_data = {
'searchindex_id': searchindex_id,
'timeline_id': timeline_dict['id'],
}
response = self.api.session.post(resource_url, json=form_data)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Unable to add timeline identifier to data',
error=ValueError)
# Step 6: Add a DataSource object.
resource_url = f'{self.api.api_root}/sketches/{self.id}/datasource/'
form_data = {
'timeline_id': timeline_dict['id'],
'provider': provider,
'context': context,
'data_label': data_label,
}
response = self.api.session.post(resource_url, json=form_data)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Error creating a datasource object',
error=ValueError)
_ = error.get_response_json(response, logger)
return timeline_obj
def run_data_finder(
self, start_date, end_date, rule_names, timelines=None):
"""Runs the data finder .
Args:
start_date (str): Start date as a ISO 8601 formatted string.
end_date (str): End date as a ISO 8601 formatted string.
rule_names (list): A list of strings with rule names to run
against the dataset in the sketch.
timelines (list): Optional list of timeline identifiers or
timeline names to limit the data search to certain
timelines within the sketch. Defaults to search all
timelines.
Returns:
A list of dictionaries, one dict for each rule that was run,
alongside it's results.
"""
if timelines is None:
timeline_ids = [t.id for t in self.list_timelines()]
else:
timeline_ids = []
valid_ids = set()
name_to_id = {}
for _timeline in self.list_timelines():
valid_ids.add(_timeline.id)
name_to_id[_timeline.name.lower()] = _timeline.id
for _timeline in timelines:
if isinstance(_timeline, int) and _timeline in valid_ids:
timeline_ids.append(_timeline)
continue
if not isinstance(_timeline, str):
logger.error(
'Unable to use timeline, it needs to either be '
'a string or an integer.')
continue
if _timeline.lower() not in name_to_id:
logger.error(
'Unable to add timeline, name not found in active '
'timelines in the sketch.')
continue
timeline_ids.append(name_to_id[_timeline.lower()])
data = {
'start_date': start_date,
'end_date': end_date,
'timeline_ids': timeline_ids,
'rule_names': rule_names
}
resource_url = f'{self.api.api_root}/sketches/{self.id}/data/find/'
response = self.api.session.post(resource_url, json=data)
if response.status_code not in definitions.HTTP_STATUS_CODE_20X:
error.error_message(
response, message='Unable to find data', error=ValueError)
response_dict = error.get_response_json(response, logger)
return response_dict.get('objects', [])
|
google/timesketch
|
api_client/python/timesketch_api_client/sketch.py
|
Python
|
apache-2.0
| 66,968
|
[
"Elk"
] |
88017a294dc0557a72e285682c073905f1fedb79be7d5d2cd160726b6cf7eaf7
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_stdout(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, video_id, video_title, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
elif item_type == 'WebPage':
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing audio group an audio group, it represents
# a complete (with audio and video) format. So, for such cases
# we will ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
'url': media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
'url': segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
fragments.append({
'url': representation_ms_info['segment_urls'][segment_index],
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({'url': initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
for fragment in f['fragments']:
fragment['url'] = urljoin(base_url, fragment['url'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
ext = determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>video|audio)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
http_base_url = 'http' + url_base
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': protocol + url_base,
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
source_url = self._proto_relative_url(source.get('file'))
if not source_url:
continue
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
if source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
DivineHime/seishirou
|
lib/youtube_dl/extractor/common.py
|
Python
|
gpl-3.0
| 119,472
|
[
"VisIt"
] |
3d60d1a23bfd6173cb438cccca6b3826cd7f8f712d351798c49121059eaaba95
|
import os
import sys
import re
import glob
import string
import traceback
import time
# list of tests to run ...
# prefixes
# C = command line scripts
# G = graphics and internal GUI
standard_prefixes = ['C', 'G' ]
# uniform parameter handling
argv = sys.argv
if len(argv):
if re.search("\.py$",argv[0]):
argv = argv[1:]
# ---
pymol = "pymol"
cmmd = "c:\pymolws\pymol.exe "
cmp = "cmp"
ref = "ref"
inp = "inp"
tmp = "tmp"
if not os.path.exists(cmp):
os.mkdir(cmp)
if not os.path.exists(tmp):
os.mkdir(tmp)
if len(argv)>1:
tests = argv
else:
tests = standard_prefixes
for test in tests:
flist = glob.glob( inp+"/"+test+"*" )
cvs = inp+"/CVS"
if cvs in flist:
flist.remove(cvs)
flist.sort()
for ifil in flist:
# get options
f = open(ifil)
opt = string.strip(f.readline())
opt = re.sub("^\s*\#","",opt)
f.close()
tst = re.sub(r".*/|.*\\","",ifil) # get exact test name without suffix
tst = re.sub(r"\..*","",tst)
print " run_tests: "+tst+"..."
syscmd = cmmd+" -x -d pwd "+opt+" "+ifil+" > tmp.txt 2>&1"
print syscmd
#os.system("c:\\pymolws\\pymol.exe")
os.system(syscmd)
# generate log file
f = open("tmp.txt")
g = open("cmp\\"+tst+".log","w")
echo = 0
while 1:
l = f.readline()
if not l: break
ll=string.strip(l)
if ll=='BEGIN-LOG':
echo = 1
elif ll=='END-LOG':
echo = 0
elif echo:
g.write(l)
f.close()
g.close()
# now compare
f = open("cmp\\"+tst+".log","r")
g = open("ref\\"+tst+".log","r")
while 1:
lf = f.readline()
lg = g.readline()
if (not lf) and not (lg):
break
if string.strip(lf)!=string.strip(lg):
print "<",lf
print ">",lg
print "done"
time.sleep(360)
#
|
gratefulfrog/lib
|
python/pymol/pymol_path/test/win.py
|
Python
|
gpl-2.0
| 1,943
|
[
"PyMOL"
] |
a94d09b43cab4937fb69ba4d64d7c301753d1350d9d6e1e72156f53bcfcab69a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAN commands to Lenovo Switches
# Overloading aspect of vlan creation in a range is pending
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlan
author: "Dave Kasberg (@dkasberg)"
short_description: Manage VLAN resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with VLAN related configurations. The
operators used are overloaded to ensure control over switch VLAN
configurations. The first level of VLAN configuration allows to set up the
VLAN range, the VLAN tag persistence, a VLAN access map and access map
filter. After passing this level, there are five VLAN arguments that will
perform further configurations. They are vlanArg1, vlanArg2, vlanArg3,
vlanArg4, and vlanArg5. The value of vlanArg1 will determine the way
following arguments will be evaluated. For more details on how to use these
arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlan.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlanArg1:
description:
- This is an overloaded vlan first argument. Usage of this argument can be found is the User Guide referenced above.
required: true
default: null
choices: [access-map, dot1q, filter, <1-3999> VLAN ID 1-3999 or range]
vlanArg2:
description:
- This is an overloaded vlan second argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [VLAN Access Map name,egress-only,name, flood,state, ip]
vlanArg3:
description:
- This is an overloaded vlan third argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [action, match, statistics, enter VLAN id or range of vlan, ascii name for the VLAN, ipv4 or ipv6, active or suspend, fast-leave,
last-member-query-interval, mrouter, querier, querier-timeout, query-interval, query-max-response-time, report-suppression,
robustness-variable, startup-query-count, startup-query-interval, static-group]
vlanArg4:
description:
- This is an overloaded vlan fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [drop or forward or redirect, ip or mac,Interval in seconds,ethernet, port-aggregation, Querier IP address,
Querier Timeout in seconds, Query Interval in seconds, Query Max Response Time in seconds, Robustness Variable value,
Number of queries sent at startup, Query Interval at startup]
vlanArg5:
description:
- This is an overloaded vlan fifth argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [access-list name, Slot/chassis number, Port Aggregation Number]
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_vlan. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlan - Create a vlan, name it
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "name"
vlanArg3: "Anil"
- name: Test Vlan - Create a vlan, Flood configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "flood"
vlanArg3: "ipv4"
- name: Test Vlan - Create a vlan, State configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "state"
vlanArg3: "active"
- name: Test Vlan - VLAN Access map1
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "statistics"
- name: Test Vlan - VLAN Accep Map2
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "action"
vlanArg4: "forward"
- name: Test Vlan - ip igmp snooping query interval
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "query-interval"
vlanArg4: 1313
- name: Test Vlan - ip igmp snooping mrouter interface port-aggregation 23
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "mrouter"
vlanArg4: "port-aggregation"
vlanArg5: 23
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "VLAN configuration is accomplished"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlan creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlanArg1=dict(required=True),
vlanArg2=dict(required=False),
vlanArg3=dict(required=False),
vlanArg4=dict(required=False),
vlanArg5=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
vlanArg1 = module.params['vlanArg1']
vlanArg2 = module.params['vlanArg2']
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse("conf d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + \
cnos.vlanConfig(
remote_conn, deviceType, "(config)#", 2, vlanArg1, vlanArg2,
vlanArg3, vlanArg4, vlanArg5)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="VLAN configuration is accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
kbrebanov/ansible
|
lib/ansible/modules/network/cnos/cnos_vlan.py
|
Python
|
gpl-3.0
| 11,632
|
[
"VisIt"
] |
2aaec32097dee7f11dcb83b49095f3a416215e24d941bc3a64c7ea84fdb5ac3c
|
#!/usr/bin/env python
'''
IP/EA-ADC calculations for closed-shell N2
'''
from pyscf import gto, scf, adc
mol = gto.Mole()
r = 1.098
mol.atom = [
['N', ( 0., 0. , -r/2 )],
['N', ( 0., 0. , r/2)],]
mol.basis = {'N':'aug-cc-pvdz'}
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf)
myadc.kernel_gs()
#IP-RADC(2) for 3 roots
myadc.verbose = 4
myadcip = adc.radc.RADCIP(myadc)
eip,vip,pip,xip = myadcip.kernel(nroots=3)
#EA-RADC(3) for 3 roots
myadc.method = "adc(3)"
myadc.kernel_gs()
myadcea = adc.radc.RADCEA(myadc)
eea,vea,pea,xea = myadcea.kernel(nroots=3)
#Analyze eigenvectors only
myadcea.compute_properties = False
myadcea.analyze()
#IP/EA-RADC(3) for 1 root
eip,vip,pip,xip,adc_es = myadc.ip_adc()
eea,vea,pea,xea,adc_es = myadc.ea_adc()
|
sunqm/pyscf
|
examples/adc/03-closed_shell_different_setup.py
|
Python
|
apache-2.0
| 801
|
[
"PySCF"
] |
ba561c450d82a0832ba4b508ac9fabf85755393f0536eca8f33c7fb271f0d99c
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-11 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1950-2020/0.5x0.5/combined_sources_C3H8_lumped_1950-2020.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i306: C3H8 surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i306'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='C3H8'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name=str.strip(species_name)+' surf emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_propane_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
ocube.attributes['lumped_species']='C3H8 and C3H6' # lumping of species
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_C3H8_lumped_1950-2020.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of propane, lumped with propene, from 1950 to 2020'
ocube.attributes['File_version']='v3'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1950-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name','lumped_species'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1950-2020/regrid_C3H8_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 19,239
|
[
"NetCDF"
] |
939761a3d1ec3bffec49a531859ae67db44c0270553cdb9164894203b3d3c378
|
"""
tune_combat.py
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from common.combat import Combat
from server.game import Fleet
def CreateFleet1():
f = Fleet(ID=1)
f.PlayerID=1
f.Ships = 10
f.IsDefending=False
return f
def CreateFleet2():
f = Fleet(ID=2)
f.PlayerID=2
f.Ships = 20
f.IsDefending=False
return f
def main():
print "IN"
distrib = [0,]*21
for seed in range(0,1000):
f1 = CreateFleet1()
f2 = CreateFleet2()
battle = Combat(seed, [f1, f2])
battle.HitLevel=400
battle.RunCombat(run_one_step=False)
out = battle.OutFleets[0]
#print out.PlayerID,out.Ships
if out.PlayerID == 1:
distrib[0] += 1
else:
distrib[out.Ships] += 1
idx = range(0,21)
print "*" * 20
for (i,val) in zip(idx,distrib):
print i,val
if __name__ == '__main__':
main()
|
brianr747/Simple4Xpygame
|
tune_combat.py
|
Python
|
apache-2.0
| 1,484
|
[
"Brian"
] |
7a41e03ec0974036a7d89a0c62105dc6b97e8b9aecf4c8046943e7005cfcd6d0
|
#!/usr/bin/env python3
########################################################################
# Solves problem 1 from projectEuler.net.
# Sums all the multiples of 3 or 5 from 1 to 1000(exclusive)
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
if __name__ == '__main__':
result = 0
for x in range(1,1000):
result += ((x % 3 == 0) or (x % 5 == 0)) and x or 0
print("The result is:", result)
|
sanSS/programming-contests
|
project-euler/problem001.py
|
Python
|
gpl-3.0
| 1,240
|
[
"VisIt"
] |
a79ba0398f1e6dd03f92b0814d7f3eace92fe8b448d3686b6fdf8841888220c7
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 11:20:48 2020
@author: tommy
"""
if __name__ == "__main__":
from KDEpy import FFTKDE
import matplotlib.pyplot as plt
customer_ages = [40, 56, 20, 35, 27, 24, 29, 37, 39, 46]
# Distribution of customers
x, y = FFTKDE(kernel="gaussian", bw="silverman").fit(customer_ages).evaluate()
plt.plot(x, y)
# Distribution of customer income (weight each customer by their income)
customer_income = [152, 64, 24, 140, 88, 64, 103, 148, 150, 132]
# The `bw` parameter can be manually set, e.g. `bw=5`
x, y = FFTKDE(bw="silverman").fit(customer_ages, weights=customer_income).evaluate()
plt.plot(x, y)
if __name__ == "__main__":
plt.show() # Flush out the calls to plt.plot earlier
import os
import numpy as np
# Convert to arrays
customer_ages = np.array(customer_ages)
customer_income = np.array(customer_income)
# Create the splot
plt.figure(figsize=(10, 2.25))
# ------------------------------------------------------------------------
ax1 = plt.subplot(121)
plt.title("Distribution of customers")
x, y = FFTKDE(bw="silverman").fit(customer_ages).evaluate()
plt.plot(x, y, zorder=10)
plt.scatter(
customer_ages, np.zeros_like(customer_ages), marker="o", color="red", zorder=10, s=np.min(customer_income / 2)
)
plt.grid(True, ls="--", zorder=-15)
plt.yticks(fontsize=0)
plt.xlabel("Age")
plt.ylabel("Probability density")
# ------------------------------------------------------------------------
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
plt.title("Distribution of customer income")
x, y = FFTKDE(bw="silverman").fit(customer_ages, weights=customer_income).evaluate()
plt.plot(x, y, zorder=10)
plt.scatter(customer_ages, np.zeros_like(customer_ages), marker="o", color="red", zorder=10, s=customer_income / 2)
plt.grid(True, ls="--", zorder=-15)
plt.yticks(fontsize=0)
plt.xlabel("Age")
# ------------------------------------------------------------------------
# Save the figure
here = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(here, "_static/img", "README_example.png")
plt.savefig(filename, dpi=200)
|
tommyod/KDEpy
|
docs/source/README_examples.py
|
Python
|
gpl-3.0
| 2,299
|
[
"Gaussian"
] |
54adf2dad703a0d1aaca4ae64a978eef4154184c7d02b4bb96a269ce8fb437e0
|
from functools import partial
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.uix.accordion import AccordionItem
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.togglebutton import ToggleButton
class PlaygroundSizeSelector(Button):
'''Button to open playground size selection view
'''
view = ObjectProperty()
'''This property refers to the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
instance.
:data:`view` is an :class:`~kivy.properties.ObjectProperty`
'''
playground = ObjectProperty()
'''This property holds a reference to the
:class:`~designer.playground.Playground` instance.
:data:`playground` is an :class:`~kivy.properties.ObjectProperty`
'''
def on_playground(self, *_):
'''Create a
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
for the current playground.
'''
self.view = PlaygroundSizeView(selected_size=self.playground.size)
self.view.bind(selected_size=self._update_playground)
self.view.bind(selected_size_name=self.setter('text'))
self.text = self.view.selected_size_name
def _update_playground(self, _, size):
'''Callback to update the playground size on :data:`selected_size`
changes
'''
if self.playground:
self.playground.size = size
if self.playground.root:
self.playground.root.size = size
def on_press(self):
'''Open the
:class:`~designer.uix.playground_size_selector.PlaygroundSizeView`
'''
self.view.size_hint = None, None
self.view.width = self.get_root_window().width / 2.
self.view.height = self.get_root_window().height / 2.
self.view.attach_to = self
self.view.open()
class PlaygroundSizeView(ModalView):
'''Dialog for playground size selection
'''
accordion = ObjectProperty()
'''This property holds a reference to the
:class:`~kivy.uix.accordion.Accordion` inside the dialog.
:data:`accordion` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size = ObjectProperty()
'''This property contains the currently selected playground size.
:data:`selected_size` is an :class:`~kivy.properties.ObjectProperty`
'''
selected_size_name = StringProperty('')
'''This property contains the name associated with :data:`selected_size`.
:data:`selected_size_name` is a :class:`~kivy.properties.StringProperty`
'''
selected_orientation = OptionProperty(
'landscape', options=('portrait', 'landscape')
)
'''This property contains the screen orientation for :data:`selected_size`.
:data:`selected_orientation` is an
:class:`~kivy.properties.OptionProperty`
'''
default_sizes = (
('Desktop - SD', (
('Default', (550, 350)),
('Small', (800, 600)),
('Medium', (1024, 768)),
('Large', (1280, 1024)),
('XLarge', (1600, 1200))
)),
('Desktop - HD', (
('720p', (1280, 720)),
('LVDS', (1366, 768)),
('1080p', (1920, 1080)),
('4K', (3840, 2160)),
('4K Cinema', (4096, 2160))
)),
('Generic', (
('QVGA', (320, 240)),
('WQVGA400', (400, 240)),
('WQVGA432', (432, 240)),
('HVGA', (480, 320)),
('WVGA800', (800, 480)),
('WVGA854', (854, 480)),
('1024x600', (1024, 600)),
('1024x768', (1024, 768)),
('1280x768', (1280, 768)),
('WXGA', (1280, 800)),
('640x480', (640, 480)),
('1536x1152', (1536, 1152)),
('1920x1152', (1920, 1152)),
('1920x1200', (1920, 1200)),
('960x640', (960, 640)),
('2048x1536', (2048, 1536)),
('2560x1536', (2560, 1536)),
('2560x1600', (2560, 1600)),
)),
('Android', (
('HTC One', (1920, 1080)),
('HTC One X', (1920, 720)),
('HTC One SV', (800, 480)),
('Galaxy S3', (1280, 720)),
('Galaxy Note 2', (1280, 720)),
('Motorola Droid 2', (854, 480)),
('Motorola Xoom', (1280, 800)),
('Xperia E', (480, 320)),
('Nexus 4', (1280, 768)),
('Nexus 7 (2012)', (1280, 800)),
('Nexus 7 (2013)', (1920, 1200)),
)),
('iOS', (
('iPad 1/2', (1024, 768)),
('iPad 3', (2048, 1536)),
('iPhone 4', (960, 640)),
('iPhone 5', (1136, 640)),
)),
)
'''Ordered map of default selectable sizes.
'''
def __init__(self, **kwargs):
self._buttons = {}
super(PlaygroundSizeView, self).__init__(**kwargs)
for title, values in self.default_sizes:
grid = GridLayout(rows=4)
def sort_sizes(item):
return item[1][1] * item[1][0]
values = sorted(values, key=sort_sizes, reverse=True)
for name, size in values:
btn = ToggleButton(text='', markup=True)
btntext = ('%s\n[color=777777][size=%d]%dx%d[/size][/color]' %
(name, btn.font_size * 0.8, size[0], size[1]))
btn.text = btntext
btn.bind(on_press=partial(self.set_size, size))
grid.add_widget(btn)
self._buttons[name] = btn
item = AccordionItem(title=title)
item.add_widget(grid)
self.accordion.add_widget(item)
self.accordion.select(self.accordion.children[-1])
self.update_buttons()
def find_size(self):
'''Find the size name and orientation for the current size.
'''
orientation = self.check_orientation(self.selected_size)
check_size = tuple(sorted(self.selected_size, reverse=True)).__eq__
for _, values in self.default_sizes:
for name, size in values:
if check_size(size):
return name, size, orientation
return 'Custom', self.selected_size, orientation
def check_orientation(self, size):
'''Determine if the provided size is portrait or landscape.
'''
return 'portrait' if size[1] > size[0] else 'landscape'
def update_buttons(self, size_name=None):
'''Update the toggle state of the size buttons and open the
appropriate accordion section.
'''
if not size_name:
size_name = self.find_size()[0]
for name, btn in list(self._buttons.items()):
if name == size_name:
btn.state = 'down'
self.accordion.select(btn.parent.parent.parent.parent.parent)
else:
btn.state = 'normal'
def on_selected_size(self, *_):
'''Callback to update properties on changes to :data:`selected_size`.
'''
size_info = self.find_size()
self.selected_size_name = ('%s\n[color=777777](%s, %dx%d)[/color]' %
(size_info[0], size_info[2],
size_info[1][0], size_info[1][1]))
self.selected_orientation = size_info[2]
self.update_buttons(size_info[0])
def update_size(self, size):
'''Set :data:`selected_size` while taking orientation into account.
'''
size = sorted(size, reverse=self.selected_orientation == 'landscape')
self.selected_size = size
def set_size(self, size, *_):
'''Set :data:`selected_size` and close the dialog.
'''
self.update_size(size)
self.dismiss()
def on_selected_orientation(self, _, value):
'''Callback to update size on changes to :data:`selected_orientation`.
'''
self.update_size(self.selected_size)
|
MiyamotoAkira/kivy-designer
|
designer/uix/playground_size_selector.py
|
Python
|
mit
| 8,095
|
[
"Galaxy"
] |
d8359bbf438abbf87956660db3a6d1d7bb533da272cd3adbca7d9478220bb74f
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Gaussian Mixture Model (GMM).
Run with:
bin/spark-submit examples/src/main/python/ml/gaussian_mixture_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import GaussianMixture
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GaussianMixtureExample")\
.getOrCreate()
# $example on$
# loads data
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
gmm = GaussianMixture().setK(2).setSeed(538009335)
model = gmm.fit(dataset)
print("Gaussians shown as a DataFrame: ")
model.gaussiansDF.show(truncate=False)
# $example off$
spark.stop()
|
lhfei/spark-in-action
|
spark-2.x/src/main/python/ml/gaussian_mixture_example.py
|
Python
|
apache-2.0
| 1,616
|
[
"Gaussian"
] |
d38ebbc090bd05aec963427affb2beb7fb94ea0eea0b2c4d253470928df504e4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A Flow is a container for Works, and works consist of tasks.
Flows are the final objects that can be dumped directly to a pickle file on disk
Flows are executed using abirun (abipy).
"""
from __future__ import unicode_literals, division, print_function
import os
import sys
import time
import collections
import warnings
import shutil
import copy
import tempfile
import numpy as np
from pprint import pprint
from six.moves import map, StringIO
from tabulate import tabulate
from pydispatch import dispatcher
from collections import OrderedDict
from monty.collections import as_set, dict2namedtuple
from monty.string import list_strings, is_string, make_banner
from monty.operator import operator_from_str
from monty.io import FileLock
from monty.pprint import draw_tree
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.dev import deprecated
from monty.json import MSONable
from pymatgen.serializers.pickle_coders import pmg_pickle_load, pmg_pickle_dump
from pymatgen.serializers.json_coders import pmg_serialize
from pymatgen.core.units import Memory
from pymatgen.util.io_utils import AtomicFile
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from . import wrappers
from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator
from .tasks import ScfTask, DdkTask, DdeTask, TaskManager, FixQueueCriticalError
from .utils import File, Directory, Editor
from .abiinspect import yaml_read_irred_perts
from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork, DteWork
from .events import EventsParser # autodoc_event_handlers
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Flow",
"G0W0WithQptdmFlow",
"bandstructure_flow",
"g0w0_flow",
"phonon_flow",
]
class FlowResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
#JSON_SCHEMA["properties"] = {
# "queries": {"type": "string", "required": True},
#}
@classmethod
def from_node(cls, flow):
"""Initialize an instance from a Work instance."""
new = super(FlowResults, cls).from_node(flow)
# Will put all files found in outdir in GridFs
d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()}
# Add the pickle file.
d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t")
new.add_gridfs_files(**d)
return new
class FlowError(NodeError):
"""Base Exception for :class:`Node` methods"""
class Flow(Node, NodeContainer, MSONable):
"""
This object is a container of work. Its main task is managing the
possible inter-dependencies among the work and the creation of
dynamic workflows that are generated by callbacks registered by the user.
.. attributes::
creation_date: String with the creation_date
pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol)
Important methods for constructing flows:
.. methods::
register_work: register (add) a work to the flow
resister_task: register a work that contains only this task returns the work
allocate: propagate the workdir and manager of the flow to all the registered tasks
build:
build_and_pickle_dump:
"""
VERSION = "0.1"
PICKLE_FNAME = "__AbinitFlow__.pickle"
Error = FlowError
Results = FlowResults
@classmethod
def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=-1, task_class=ScfTask, work_class=Work):
"""
Construct a simple flow from a list of inputs. The flow contains a single Work with
tasks whose class is given by task_class.
.. warning::
Don't use this interface if you have dependencies among the tasks.
Args:
workdir: String specifying the directory where the works will be produced.
inputs: List of inputs.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
task_class: The class of the :class:`Task`.
work_class: The class of the :class:`Work`.
"""
if not isinstance(inputs, (list, tuple)): inputs = [inputs]
flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol)
work = work_class()
for inp in inputs:
work.register(inp, task_class=task_class)
flow.register_work(work)
return flow.allocate()
@classmethod
def as_flow(cls, obj):
"""Convert obj into a Flow. Accepts filepath, dict, or Flow object."""
if isinstance(obj, cls): return obj
if is_string(obj):
return cls.pickle_load(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s into a Flow" % type(obj))
def __init__(self, workdir, manager=None, pickle_protocol=-1, remove=False):
"""
Args:
workdir: String specifying the directory where the works will be produced.
if workdir is None, the initialization of the working directory
is performed by flow.allocate(workdir).
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
super(Flow, self).__init__()
if workdir is not None:
if remove and os.path.exists(workdir): shutil.rmtree(workdir)
self.set_workdir(workdir)
self.creation_date = time.asctime()
if manager is None: manager = TaskManager.from_user_config()
self.manager = manager.deepcopy()
# List of works.
self._works = []
self._waited = 0
# List of callbacks that must be executed when the dependencies reach S_OK
self._callbacks = []
# Install default list of handlers at the flow level.
# Users can override the default list by calling flow.install_event_handlers in the script.
# Example:
#
# # flow level (common case)
# flow.install_event_handlers(handlers=my_handlers)
#
# # task level (advanced mode)
# flow[0][0].install_event_handlers(handlers=my_handlers)
#
self.install_event_handlers()
self.pickle_protocol = int(pickle_protocol)
# ID used to access mongodb
self._mongo_id = None
# Save the location of the script used to generate the flow.
# This trick won't work if we are running with nosetests, py.test etc
pyfile = find_top_pyfile()
if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">"
self.set_pyfile(pyfile)
# TODO
# Signal slots: a dictionary with the list
# of callbacks indexed by node_id and SIGNAL_TYPE.
# When the node changes its status, it broadcast a signal.
# The flow is listening to all the nodes of the calculation
# [node_id][SIGNAL] = list_of_signal_handlers
#self._sig_slots = slots = {}
#for work in self:
# slots[work] = {s: [] for s in work.S_ALL}
#for task in self.iflat_tasks():
# slots[task] = {s: [] for s in work.S_ALL}
@pmg_serialize
def as_dict(self, **kwargs):
"""
JSON serialization, note that we only need to save
a string with the working directory since the object will be
reconstructed from the pickle file located in workdir
"""
return {"workdir": self.workdir}
# This is needed for fireworks.
to_dict = as_dict
@classmethod
def from_dict(cls, d, **kwargs):
"""Reconstruct the flow from the pickle file."""
return cls.pickle_load(d["workdir"], **kwargs)
@classmethod
def temporary_flow(cls, manager=None):
"""Return a Flow in a temporary directory. Useful for unit tests."""
return cls(workdir=tempfile.mkdtemp(), manager=manager)
def set_workdir(self, workdir, chroot=False):
"""
Set the working directory. Cannot be set more than once unless chroot is True
"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
# Directories with (input|output|temporary) data.
self.workdir = os.path.abspath(workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new
@classmethod
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False):
"""
Loads the object from a pickle file and performs initial setup.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
spectator_mode: If True, the nodes of the flow are not connected by signals.
This option is usually used when we want to read a flow
in read-only mode and we want to avoid callbacks that can change the flow.
remove_lock:
True to remove the file lock if any (use it carefully).
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
if remove_lock and os.path.exists(filepath + ".lock"):
try:
os.remove(filepath + ".lock")
except:
pass
with FileLock(filepath):
with open(filepath, "rb") as fh:
flow = pmg_pickle_load(fh)
# Check if versions match.
if flow.VERSION != cls.VERSION:
msg = ("File flow version %s != latest version %s\n."
"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION))
warnings.warn(msg)
flow.set_spectator_mode(spectator_mode)
# Recompute the status of each task since tasks that
# have been submitted previously might be completed.
flow.check_status()
return flow
@classmethod
def pickle_loads(cls, s):
"""Reconstruct the flow from a string."""
strio = StringIO()
strio.write(s)
strio.seek(0)
flow = pmg_pickle_load(strio)
return flow
def __len__(self):
return len(self.works)
def __iter__(self):
return self.works.__iter__()
def __getitem__(self, slice):
return self.works[slice]
def set_pyfile(self, pyfile):
"""
Set the path of the python script used to generate the flow.
.. Example:
flow.set_pyfile(__file__)
"""
# TODO: Could use a frame hack to get the caller outside abinit
# so that pyfile is automatically set when we __init__ it!
self._pyfile = os.path.abspath(pyfile)
@property
def pyfile(self):
"""
Absolute path of the python script used to generate the flow. Set by `set_pyfile`
"""
try:
return self._pyfile
except AttributeError:
return None
@property
def pid_file(self):
"""The path of the pid file created by PyFlowScheduler."""
return os.path.join(self.workdir, "_PyFlowScheduler.pid")
def check_pid_file(self):
"""
This function checks if we are already running the :class:`Flow` with a :class:`PyFlowScheduler`.
Raises: Flow.Error if the pif file of the scheduler exists.
"""
if not os.path.exists(self.pid_file):
return 0
self.show_status()
raise self.Error("""\n\
pid_file
%s
already exists. There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % self.pid_file)
@property
def pickle_file(self):
"""The path of the pickle file."""
return os.path.join(self.workdir, self.PICKLE_FNAME)
@property
def mongo_id(self):
return self._mongo_id
@mongo_id.setter
def mongo_id(self, value):
if self.mongo_id is not None:
raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id)
self._mongo_id = value
def mongodb_upload(self, **kwargs):
from abiflows.core.scheduler import FlowUploader
FlowUploader().upload(self, **kwargs)
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors
def get_mongo_info(self):
"""
Return a JSON dictionary with information on the flow.
Mainly used for constructing the info section in `FlowEntry`.
The default implementation is empty. Subclasses must implement it
"""
return {}
def mongo_assimilate(self):
"""
This function is called by client code when the flow is completed
Return a JSON dictionary with the most important results produced
by the flow. The default implementation is empty. Subclasses must implement it
"""
return {}
@property
def works(self):
"""List of :class:`Work` objects contained in self.."""
return self._works
@property
def all_ok(self):
"""True if all the tasks in works have reached `S_OK`."""
return all(work.all_ok for work in self)
@property
def num_tasks(self):
"""Total number of tasks"""
return len(list(self.iflat_tasks()))
@property
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks)
@property
def num_errored_tasks(self):
"""The number of tasks whose status is `S_ERROR`."""
return len(self.errored_tasks)
@property
def unconverged_tasks(self):
"""List of unconverged tasks."""
return list(self.iflat_tasks(status=self.S_UNCONVERGED))
@property
def num_unconverged_tasks(self):
"""The number of tasks whose status is `S_UNCONVERGED`."""
return len(self.unconverged_tasks)
@property
def status_counter(self):
"""
Returns a :class:`Counter` object that counts the number of tasks with
given status (use the string representation of the status as key).
"""
# Count the number of tasks with given status in each work.
counter = self[0].status_counter
for work in self[1:]:
counter += work.status_counter
return counter
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if the task is not running but
we have submitted the task to the queue manager.
"""
return sum(work.ncores_reserved for work in self)
@property
def ncores_allocated(self):
"""
Returns the number of cores allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(work.ncores_allocated for work in self)
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(work.ncores_used for work in self)
@property
def has_chrooted(self):
"""
Returns a string that evaluates to True if we have changed
the workdir for visualization purposes e.g. we are using sshfs.
to mount the remote directory where the `Flow` is located.
The string gives the previous workdir of the flow.
"""
try:
return self._chrooted_from
except AttributeError:
return ""
def chroot(self, new_workdir):
"""
Change the workir of the :class:`Flow`. Mainly used for
allowing the user to open the GUI on the local host
and access the flow from remote via sshfs.
.. note::
Calling this method will make the flow go in read-only mode.
"""
self._chrooted_from = self.workdir
self.set_workdir(new_workdir, chroot=True)
for i, work in enumerate(self):
new_wdir = os.path.join(self.workdir, "w" + str(i))
work.chroot(new_wdir)
def groupby_status(self):
"""
Returns a ordered dictionary mapping the task status to
the list of named tuples (task, work_index, task_index).
"""
Entry = collections.namedtuple("Entry", "task wi ti")
d = collections.defaultdict(list)
for task, wi, ti in self.iflat_tasks_wti():
d[task.status].append(Entry(task, wi, ti))
# Sort keys according to their status.
return OrderedDict([(k, d[k]) for k in sorted(list(d.keys()))])
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks
def iflat_nodes(self, status=None, op="==", nids=None):
"""
Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes.
"""
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task
def node_from_nid(self, nid):
"""Return the node in the `Flow` with the given `nid` identifier"""
for node in self.iflat_nodes():
if node.node_id == nid: return node
raise ValueError("Cannot find node with node id: %s" % nid)
def iflat_tasks_wti(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
def iflat_tasks(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the :class:`Flow`.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False)
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task
@deprecated(message="show_inpvars will be removed in pymatgen 4.0. Use show_inputs")
def show_inpvars(self, *varnames):
from abipy.htc.variable import InputVariable
lines = []
app = lines.append
for task in self.iflat_tasks():
app(str(task))
for name in varnames:
value = task.input.get(name)
app(str(InputVariable(name, value)))
return "\n".join(lines)
def abivalidate_inputs(self):
"""
Run ABINIT in dry mode to validate all the inputs of the flow.
Return:
(isok, tuples)
isok is True if all inputs are ok.
tuples is List of `namedtuple` objects, one for each task in the flow.
Each namedtuple has the following attributes:
retcode: Return code. 0 if OK.
log_file: log file of the Abinit run, use log_file.read() to access its content.
stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.
Raises:
`RuntimeError` if executable is not in $PATH.
"""
if not self.allocated:
self.build()
#self.build_and_pickle_dump()
isok, tuples = True, []
for task in self.iflat_tasks():
t = task.input.abivalidate()
if t.retcode != 0: isok = False
tuples.append(t)
return isok, tuples
def check_dependencies(self):
"""Test the dependencies of the nodes for possible deadlocks."""
deadlocks = []
for task in self.iflat_tasks():
for dep in task.deps:
if dep.node.depends_on(task):
deadlocks.append((task, dep.node))
if deadlocks:
lines = ["Detect wrong list of dependecies that will lead to a deadlock:"]
lines.extend(["%s <--> %s" % nodes for nodes in deadlocks])
raise RuntimeError("\n".join(lines))
def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
@property
def status(self):
"""The status of the :class:`Flow` i.e. the minimum of the status of its tasks and its works"""
return min(work.get_all_status(only_min=True) for work in self)
#def restart_unconverged_tasks(self, max_nlauch, excs):
# nlaunch = 0
# for task in self.unconverged_tasks:
# try:
# logger.info("Flow will try restart task %s" % task)
# fired = task.restart()
# if fired:
# nlaunch += 1
# max_nlaunch -= 1
# if max_nlaunch == 0:
# logger.info("Restart: too many jobs in the queue, returning")
# self.pickle_dump()
# return nlaunch, max_nlaunch
# except task.RestartError:
# excs.append(straceback())
# return nlaunch, max_nlaunch
def fix_abicritical(self):
"""
This function tries to fix critical events originating from ABINIT.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_ABICRITICAL):
count += task.fix_abicritical()
return count
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
logger.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
logger.info("Not able to fix task %s" % task)
return count
def show_info(self, **kwargs):
"""
Print info on the flow i.e. total number of tasks, works, tasks grouped by class.
Example:
Task Class Number
------------ --------
ScfTask 1
NscfTask 1
ScrTask 2
SigmaTask 6
"""
stream = kwargs.pop("stream", sys.stdout)
lines = [str(self)]
app = lines.append
app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) )
app("Number of tasks with a given class:")
# Build Table
data = [[cls.__name__, len(tasks)]
for cls, tasks in self.groupby_task_class().items()]
app(str(tabulate(data, headers=["Task Class", "Number"])))
stream.write("\n".join(lines))
def show_summary(self, **kwargs):
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n")
def show_status(self, **kwargs):
"""
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qinfo = str(task.queue_id) + "@" + str(task.qname)
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
def show_events(self, status=None, nids=None):
"""
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
if report:
print(make_banner(str(task), width=ncols, mark="="))
print(report)
#report = report.filter_types()
def show_corrections(self, status=None, nids=None):
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count
def show_history(self, status=None, nids=None, full_history=False, metadata=False):
"""
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
"""
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work is not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts)
print(self.history.to_string(metadata=metadata))
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):
"""
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids:
List of node identifiers. By defaults all nodes are shown
wslice:
Slice object used to select works.
stream:
File-like object, Default: sys.stdout
"""
if varnames is not None:
# Build dictionary varname --> [(task1, value), (task2, value), ...]
varnames = [s.strip() for s in list_strings(varnames)]
dlist = collections.defaultdict(list)
for task in self.select_tasks(nids=nids, wslice=wslice):
dstruct = task.input.structure.as_dict(fmt="abivars")
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
if value is not None:
dlist[vname].append((task, value))
for vname in varnames:
tv_list = dlist[vname]
if not tv_list:
stream.write("[%s]: Found 0 tasks with this variable\n" % vname)
else:
stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list)))
for i, (task, value) in enumerate(tv_list):
stream.write(" %s --> %s\n" % (str(value), task))
stream.write("\n")
else:
lines = []
for task in self.select_tasks(nids=nids, wslice=wslice):
s = task.make_input(with_header=True)
# Add info on dependencies.
if task.deps:
s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps)
else:
s += "\n\nDependencies: None"
lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n")
stream.writelines(lines)
def listext(self, ext, stream=sys.stdout):
"""
Print to the given `stream` a table with the list of the output files
with the given `ext` produced by the flow.
"""
nodes_files = []
for node in self.iflat_nodes():
filepath = node.outdir.has_abiext(ext)
if filepath:
nodes_files.append((node, File(filepath)))
if nodes_files:
print("Found %s files with extension %s produced by the flow" % (len(nodes_files), ext), file=stream)
table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2),
node.node_id, node.__class__.__name__]
for node, f in nodes_files]
print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream)
else:
print("No output file with extension %s has been produced by the flow" % ext, file=stream)
def select_tasks(self, nids=None, wslice=None):
"""
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
"""
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
return tasks
def inspect(self, nids=None, wslice=None, **kwargs):
"""
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures.
"""
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs
def get_results(self, **kwargs):
results = self.Results.from_node(self)
results.update(self.get_dict_for_mongodb_queries())
return results
def get_dict_for_mongodb_queries(self):
"""
This function returns a dictionary with the attributes that will be
put in the mongodb document to facilitate the query.
Subclasses may want to replace or extend the default behaviour.
"""
d = {}
return d
# TODO
all_structures = [task.input.structure for task in self.iflat_tasks()]
all_pseudos = [task.input.pseudos for task in self.iflat_tasks()]
def look_before_you_leap(self):
"""
This method should be called before running the calculation to make
sure that the most important requirements are satisfied.
Return:
List of strings with inconsistencies/errors.
"""
errors = []
try:
self.check_dependencies()
except self.Error as exc:
errors.append(str(exc))
if self.has_db:
try:
self.manager.db_connector.get_collection()
except Exception as exc:
errors.append("""
ERROR while trying to connect to the MongoDB database:
Exception:
%s
Connector:
%s
""" % (exc, self.manager.db_connector))
return "\n".join(errors)
@property
def has_db(self):
"""True if flow uses `MongoDB` to store the results."""
return self.manager.has_db
def db_insert(self):
"""
Insert results in the `MongDB` database.
"""
assert self.has_db
# Connect to MongoDb and get the collection.
coll = self.manager.db_connector.get_collection()
print("Mongodb collection %s with count %d", coll, coll.count())
start = time.time()
for work in self:
for task in work:
results = task.get_results()
pprint(results)
results.update_collection(coll)
results = work.get_results()
pprint(results)
results.update_collection(coll)
print("MongoDb update done in %s [s]" % time.time() - start)
results = self.get_results()
pprint(results)
results.update_collection(coll)
# Update the pickle file to save the mongo ids.
self.pickle_dump()
for d in coll.find():
pprint(d)
def tasks_from_nids(self, nids):
"""
Return the list of tasks associated to the given list of node identifiers (nids).
.. note::
Invalid ids are ignored
"""
if not isinstance(nids, collections.Iterable): nids = [nids]
tasks = []
for nid in nids:
for task in self.iflat_tasks():
if task.node_id == nid:
tasks.append(task)
break
return tasks
def wti_from_nids(self, nids):
"""Return the list of (w, t) indices from the list of node identifiers nids."""
return [task.pos for task in self.tasks_from_nids(nids)]
def open_files(self, what="o", status=None, op="==", nids=None, editor=None):
"""
Open the files of the flow inside an editor (command line interface).
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
status: if not None, only the tasks with this status are select
op: status operator. Requires status. A task is selected
if task.status op status evaluates to true.
nids: optional list of node identifiers used to filter the tasks.
editor: Select the editor. None to use the default editor ($EDITOR shell env var)
"""
# Build list of files to analyze.
files = []
for task in self.iflat_tasks(status=status, op=op, nids=nids):
lst = task.select_files(what)
if lst:
files.extend(lst)
return Editor(editor=editor).edit_files(files)
def parse_timing(self, nids=None):
"""
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
"""
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
def show_abierrors(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def show_qouts(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def debug(self, status=None, nids=None):
"""
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red")
cprint(fh.read(), color="red")
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="))
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="))
cprint(s, color="red")
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red")
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red")
count += 1
break
if not count:
cprint("Houston, we could not find any error message that can explain the problem", color="magenta")
print("Number of tasks analyzed: %d" % ntasks)
def cancel(self, nids=None):
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "r") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.manager.qadapter.get_njobs_in_queue(username=username)
def rmtree(self, ignore_errors=False, onerror=None):
"""Remove workdir (same API as shutil.rmtree)."""
if not os.path.exists(self.workdir): return
shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror)
def rm_and_build(self):
"""Remove the workdir and rebuild the flow."""
self.rmtree()
self.build()
def build(self, *args, **kwargs):
"""Make directories and files of the `Flow`."""
# Allocate here if not done yet!
if not self.allocated: self.allocate()
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Check the nodeid file in workdir
nodeid_path = os.path.join(self.workdir, ".nodeid")
if os.path.exists(nodeid_path):
with open(nodeid_path, "rt") as fh:
node_id = int(fh.read())
if self.node_id != node_id:
msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n"
"This means that you are trying to build a new flow in a directory already used by another flow.\n"
"Possible solutions:\n"
" 1) Change the workdir of the new flow.\n"
" 2) remove the old directory either with `rm -rf` or by calling the method flow.rmtree()\n"
% (node_id, nodeid_path, self.node_id))
raise RuntimeError(msg)
else:
with open(nodeid_path, "wt") as fh:
fh.write(str(self.node_id))
for work in self:
work.build(*args, **kwargs)
def build_and_pickle_dump(self, abivalidate=False):
"""
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise.
"""
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines))
@check_spectator
def pickle_dump(self):
"""
Save the status of the object in pickle format.
Returns 0 if success
"""
if self.has_chrooted:
warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted)
return -1
#if self.in_spectator_mode:
# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode")
# return -2
protocol = self.pickle_protocol
# Atomic transaction with FileLock.
with FileLock(self.pickle_file):
with AtomicFile(self.pickle_file, mode="wb") as fh:
pmg_pickle_dump(self, fh, protocol=protocol)
return 0
def pickle_dumps(self, protocol=None):
"""
Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is
used if `protocol` is None
"""
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None
else protocol)
return strio.getvalue()
def register_task(self, input, deps=None, manager=None, task_class=None):
"""
Utility function that generates a `Work` made of a single task
Args:
input: :class:`AbinitInput`
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the :class:`TaskManager` specified during the creation of the work.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
The generated :class:`Work` for the task, work[0] is the actual task.
"""
work = Work(manager=manager)
task = work.register(input, deps=deps, task_class=task_class)
self.register_work(work)
return work
def register_work(self, work, deps=None, manager=None, workdir=None):
"""
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the named of the directory of the work.
work_workdir = None
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None):
"""
Registers a callback function that will generate the :class:`Task` of the :class:`Work`.
Args:
cbk_name: Name of the callback function (must be a bound method of self)
cbk_data: Additional data passed to the callback function.
deps: List of :class:`Dependency` objects specifying the dependency of the work.
work_class: :class:`Work` class to instantiate.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`.
Returns:
The :class:`Work` that will be finalized by the callback.
"""
# TODO: pass a Work factory instead of a class
# Directory of the Work.
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
# Create an empty work and register the callback
work = work_class(workdir=work_workdir, manager=manager)
self._works.append(work)
deps = [Dependency(node, exts) for node, exts in deps.items()]
if not deps:
raise ValueError("A callback must have deps!")
work.add_deps(deps)
# Wrap the callable in a Callback object and save
# useful info such as the index of the work and the callback data.
cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data)
self._callbacks.append(cbk)
return work
@property
def allocated(self):
"""Numer of allocations. Set by `allocate`."""
try:
return self._allocated
except AttributeError:
return 0
def allocate(self, workdir=None):
"""
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the :class:`TaskManager` to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
"""
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
return self
def use_smartio(self):
"""
This function should be called when the entire `Flow` has been built.
It tries to reduce the pressure on the hard disk by using Abinit smart-io
capabilities for those files that are not needed by other nodes.
Smart-io means that big files (e.g. WFK) are written only if the calculation
is unconverged so that we can restart from it. No output is produced if
convergence is achieved.
"""
if not self.allocated:
self.allocate()
#raise RuntimeError("You must call flow.allocate before invoking flow.use_smartio")
return
for task in self.iflat_tasks():
children = task.get_children()
if not children:
# Change the input so that output files are produced
# only if the calculation is not converged.
task.history.info("Will disable IO for task")
task.set_vars(prtwf=-1, prtden=0) # TODO: prt1wf=-1,
else:
must_produce_abiexts = []
for child in children:
# Get the list of dependencies. Find that task
for d in child.deps:
must_produce_abiexts.extend(d.exts)
must_produce_abiexts = set(must_produce_abiexts)
#print("must_produce_abiexts", must_produce_abiexts)
# Variables supporting smart-io.
smart_prtvars = {
"prtwf": "WFK",
}
# Set the variable to -1 to disable the output
for varname, abiext in smart_prtvars.items():
if abiext not in must_produce_abiexts:
print("%s: setting %s to -1" % (task, varname))
task.set_vars({varname: -1})
#def new_from_input_decorators(self, new_workdir, decorators)
# """
# Return a new :class:`Flow` in which all the Abinit inputs have been
# decorated by decorators.
# """
# # The trick part here is how to assign a new id to the new nodes while maintaing the
# # correct dependencies! The safest approach would be to pass through __init__
# # instead of using copy.deepcopy()
# return flow
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream)
def on_dep_ok(self, signal, sender):
# TODO
# Replace this callback with dynamic dispatch
# on_all_S_OK for work
# on_S_OK for task
logger.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal))
for i, cbk in enumerate(self._callbacks):
if not cbk.handle_sender(sender):
logger.info("%s does not handle sender %s" % (cbk, sender))
continue
if not cbk.can_execute():
logger.info("Cannot execute %s" % cbk)
continue
# Execute the callback and disable it
self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk))
cbk()
cbk.disable()
# Update the database.
self.pickle_dump()
@check_spectator
def finalize(self):
"""
This method is called when the flow is completed.
Return 0 if success
"""
if self.finalized:
self.history.warning("Calling finalize on an already finalized flow.")
return 1
self.history.info("Calling flow.finalize.")
self.finalized = True
if self.has_db:
self.history.info("Saving results in database.")
try:
self.flow.db_insert()
self.finalized = True
except Exception:
logger.critical("MongoDb insertion failed.")
return 2
# Here we remove the big output files if we have the garbage collector
# and the policy is set to "flow."
if self.gc is not None and self.gc.policy == "flow":
self.history.info("gc.policy set to flow. Will clean task output files.")
for task in self.iflat_tasks():
task.clean_output_files()
return 0
def set_garbage_collector(self, exts=None, policy="task"):
"""
Enable the garbage collector that will remove the big output files that are not needed.
Args:
exts: string or list with the Abinit file extensions to be removed. A default is
provided if exts is None
policy: Either `flow` or `task`. If policy is set to 'task', we remove the output
files as soon as the task reaches S_OK. If 'flow', the files are removed
only when the flow is finalized. This option should be used when we are dealing
with a dynamic flow with callbacks generating other tasks since a :class:`Task`
might not be aware of its children when it reached S_OK.
"""
assert policy in ("task", "flow")
exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC")
gc = GarbageCollector(exts=set(exts), policy=policy)
self.set_gc(gc)
for work in self:
#work.set_gc(gc) # TODO Add support for Works and flow policy
for task in work:
task.set_gc(gc)
def connect_signals(self):
"""
Connect the signals within the `Flow`.
The `Flow` is responsible for catching the important signals raised from its works.
"""
# Connect the signals inside each Work.
for work in self:
work.connect_signals()
# Observe the nodes that must reach S_OK in order to call the callbacks.
for cbk in self._callbacks:
#cbk.enable()
for dep in cbk.deps:
logger.info("connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False)
# Associate to each signal the callback _on_signal
# (bound method of the node that will be called by `Flow`
# Each node will set its attribute _done_signal to True to tell
# the flow that this callback should be disabled.
# Register the callbacks for the Work.
#for work in self:
# slot = self._sig_slots[work]
# for signal in S_ALL:
# done_signal = getattr(work, "_done_ " + signal, False)
# if not done_sig:
# cbk_name = "_on_" + str(signal)
# cbk = getattr(work, cbk_name, None)
# if cbk is None: continue
# slot[work][signal].append(cbk)
# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False)
# Register the callbacks for the Tasks.
#self.show_receivers()
def disconnect_signals(self):
"""Disable the signals within the `Flow`."""
# Disconnect the signals inside each Work.
for work in self:
work.disconnect_signals()
# Disable callbacks.
for cbk in self._callbacks:
cbk.disable()
def show_receivers(self, sender=None, signal=None):
sender = sender if sender is not None else dispatcher.Any
signal = signal if signal is not None else dispatcher.Any
print("*** live receivers ***")
for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)):
print("receiver -->", rec)
print("*** end live receivers ***")
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals()
#def get_results(self, **kwargs)
def rapidfire(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire()
def single_shot(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits one task.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).single_shot()
def make_scheduler(self, **kwargs):
"""
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method.
"""
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
def batch(self, timelimit=None):
"""
Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used.
"""
from .launcher import BatchLauncher
# Create a batch dir from the flow.workdir.
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
def make_light_tarfile(self, name=None):
"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file."""
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns:
The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
return int(Memory.from_string(s).to("b"))
else:
return int(s)
if max_filesize is not None:
max_filesize = any2bytes(max_filesize)
if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts
def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None
# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None
# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None
# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))
import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)
# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)
os.chdir(back)
return name
#def abirobot(self, ext, check_status=True, nids=None):
# """
# Builds and return the :class:`Robot` subclass from the file extension `ext`.
# `nids` is an optional list of node identifiers used to filter the tasks in the flow.
# """
# from abipy.abilab import abirobot
# if check_status: self.check_status()
# return abirobot(flow=self, ext=ext, nids=nids):
@add_fig_kwargs
def plot_networkx(self, mode="network", with_edge_labels=False, ax=None,
node_size="num_cores", node_label="name_class", layout_type="spring", **kwargs):
"""
Use networkx to draw the flow with the connections among the nodes and
the status of the tasks.
Args:
mode: `networkx` to show connections, `status` to group tasks by status.
with_edge_labels: True to draw edge labels.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
node_size: By default, the size of the node is proportional to the number of cores used.
node_label: By default, the task class is used to label node.
layout_type: Get positions for all nodes using `layout_type`. e.g. pos = nx.spring_layout(g)
.. warning::
Requires networkx package.
"""
if not self.allocated: self.allocate()
import networkx as nx
# Build the graph
g, edge_labels = nx.Graph(), {}
tasks = list(self.iflat_tasks())
for task in tasks:
g.add_node(task, name=task.name)
for child in task.get_children():
g.add_edge(task, child)
# TODO: Add getters! What about locked nodes!
i = [dep.node for dep in child.deps].index(task)
edge_labels[(task, child)] = " ".join(child.deps[i].exts)
# Get positions for all nodes using layout_type.
# e.g. pos = nx.spring_layout(g)
pos = getattr(nx, layout_type + "_layout")(g)
# Select function used to compute the size of the node
make_node_size = dict(num_cores=lambda task: 300 * task.manager.num_cores)[node_size]
# Select function used to build the label
make_node_label = dict(name_class=lambda task: task.pos_str + "\n" + task.__class__.__name__,)[node_label]
labels = {task: make_node_label(task) for task in g.nodes()}
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Select plot type.
if mode == "network":
nx.draw_networkx(g, pos, labels=labels,
node_color=[task.color_rgb for task in g.nodes()],
node_size=[make_node_size(task) for task in g.nodes()],
width=1, style="dotted", with_labels=True, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
elif mode == "status":
# Group tasks by status.
for status in self.ALL_STATUS:
tasks = list(self.iflat_tasks(status=status))
# Draw nodes (color is given by status)
node_color = status.color_opts["color"]
if node_color is None: node_color = "black"
#print("num nodes %s with node_color %s" % (len(tasks), node_color))
nx.draw_networkx_nodes(g, pos,
nodelist=tasks,
node_color=node_color,
node_size=[make_node_size(task) for task in tasks],
alpha=0.5, ax=ax
#label=str(status),
)
# Draw edges.
nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=True, ax=ax) # edge_color='r')
# Draw labels
nx.draw_networkx_labels(g, pos, labels, font_size=12, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal',
# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds)
else:
raise ValueError("Unknown value for mode: %s" % str(mode))
ax.axis("off")
return fig
class G0W0WithQptdmFlow(Flow):
def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None):
"""
Build a :class:`Flow` for one-shot G0W0 calculations.
The computation of the q-points for the screening is parallelized with qptdm
i.e. we run independent calculations for each q-point and then we merge the final results.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: Input(s) for the SIGMA run(s).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
"""
super(G0W0WithQptdmFlow, self).__init__(workdir, manager=manager)
# Register the first work (GS + NSCF calculation)
bands_work = self.register_work(BandStructureWork(scf_input, nscf_input))
# Register the callback that will be executed the work for the SCR with qptdm.
scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input},
deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork)
# The last work contains a list of SIGMA tasks
# that will use the data produced in the previous two works.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
sigma_work = Work()
for sigma_input in sigma_inputs:
sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"})
self.register_work(sigma_work)
self.allocate()
def cbk_qptdm_workflow(self, cbk):
"""
This callback is executed by the flow when bands_work.nscf_task reaches S_OK.
It computes the list of q-points for the W(q,G,G'), creates nqpt tasks
in the second work (QptdmWork), and connect the signals.
"""
scr_input = cbk.data["input"]
# Use the WFK file produced by the second
# Task in the first Work (NSCF step).
nscf_task = self[0][1]
wfk_file = nscf_task.outdir.has_abiext("WFK")
work = self[1]
work.set_manager(self.manager)
work.create_tasks(wfk_file, scr_input)
work.add_deps(cbk.deps)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
# Add the garbage collector.
if self.gc is not None: task.set_gc(self.gc)
work.connect_signals()
work.build()
return work
class FlowCallbackError(Exception):
"""Exceptions raised by FlowCallback."""
class FlowCallback(object):
"""
This object implements the callbacks executed by the :class:`flow` when
particular conditions are fulfilled. See on_dep_ok method of :class:`Flow`.
.. note::
I decided to implement callbacks via this object instead of a standard
approach based on bound methods because:
1) pickle (v<=3) does not support the pickling/unplickling of bound methods
2) There's some extra logic and extra data needed for the proper functioning
of a callback at the flow level and this object provides an easy-to-use interface.
"""
Error = FlowCallbackError
def __init__(self, func_name, flow, deps, cbk_data):
"""
Args:
func_name: String with the name of the callback to execute.
func_name must be a bound method of flow with signature:
func_name(self, cbk)
where self is the Flow instance and cbk is the callback
flow: Reference to the :class:`Flow`
deps: List of dependencies associated to the callback
The callback is executed when all dependencies reach S_OK.
cbk_data: Dictionary with additional data that will be passed to the callback via self.
"""
self.func_name = func_name
self.flow = flow
self.deps = deps
self.data = cbk_data or {}
self._disabled = False
def __str__(self):
return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow)
def __call__(self):
"""Execute the callback."""
if self.can_execute():
# Get the bound method of the flow from func_name.
# We use this trick because pickle (format <=3) does not support bound methods.
try:
func = getattr(self.flow, self.func_name)
except AttributeError as exc:
raise self.Error(str(exc))
return func(self)
else:
raise self.Error("You tried to __call_ a callback that cannot be executed!")
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
def disable(self):
"""
True if the callback has been disabled. This usually happens when the callback has been executed.
"""
self._disabled = True
def enable(self):
"""Enable the callback"""
self._disabled = False
def handle_sender(self, sender):
"""
True if the callback is associated to the sender
i.e. if the node who sent the signal appears in the
dependencies of the callback.
"""
return sender in [d.node for d in self.deps]
# Factory functions.
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow
def g0w0_flow(workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for one-shot $G_0W_0$ calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: List of inputs for the SIGMA run.
flow_class: Flow class
manager: :class:`TaskManager` object used to submit the jobs.
Initialized from manager.yml if manager is None.
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs)
flow.register_work(work)
if allocate: flow.allocate()
return flow
class PhononFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for phonon calculations. Each work contains
nirred tasks where nirred is the number of irreducible phonon perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True):
"""
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for
electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice
whereas ph_ngqpt = (3, 3, 3) is not!
with_becs: True if Born effective charges are wanted.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`PhononFlow` object.
"""
flow = cls(workdir, manager=manager)
# Register the SCF task
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
# Make sure k-mesh and q-mesh are compatible.
scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt)
if any(scf_ngkpt % ph_ngqpt != 0):
raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt))
# Get the q-points in the IBZ from Abinit
qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0,0,0), kptopt=1).points
# Create a PhononWork for each q-point. Add DDK and E-field if q == Gamma and with_becs.
for qpt in qpoints:
if np.allclose(qpt, 0) and with_becs:
ph_work = BecWork.from_scf_task(scf_task)
else:
ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt)
flow.register_work(ph_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(PhononFlow, self).finalize()
#print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
class NonLinearCoeffFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for electric field calculations. Each work contains
nirred tasks where nirred is the number of irreducible perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True):
"""
Create a `NonlinearFlow` for second order susceptibility calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`NonlinearFlow` object.
"""
flow = cls(workdir, manager=manager)
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
nl_work = DteWork.from_scf_task(scf_task)
flow.register_work(nl_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(NonLinearCoeffFlow, self).finalize()
print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
# Alias for compatibility reasons. For the time being, DO NOT REMOVE
nonlinear_coeff_flow = NonLinearCoeffFlow
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow
def phonon_conv_flow(workdir, scf_input, qpoints, params, manager=None, allocate=True):
"""
Create a :class:`Flow` to perform convergence studies for phonon calculations.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object defining a GS-SCF calculation.
qpoints: List of list of lists with the reduced coordinates of the q-point(s).
params:
To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]]
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
allocate: True if the flow should be allocated before returning.
Return:
:class:`Flow` object.
"""
qpoints = np.reshape(qpoints, (-1, 3))
flow = Flow(workdir=workdir, manager=manager)
for qpt in qpoints:
for gs_inp in scf_input.product(*params):
# Register the SCF task
work = flow.register_scf_task(gs_inp)
# Add the PhononWork connected to this scf_task.
flow.register_work(PhononWork.from_scf_task(work[0], qpoints=qpt))
if allocate: flow.allocate()
return flow
|
tallakahath/pymatgen
|
pymatgen/io/abinit/flows.py
|
Python
|
mit
| 106,450
|
[
"ABINIT",
"pymatgen"
] |
76628aa965dbdf4f553dca1034b8ab2632f732bdcdab3bcef73cee6bd992ed04
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobscriptgenerator - dynamically generate job script right before job handout
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Job script generator"""
import os
import time
import socket
from binascii import hexlify
from copy import deepcopy
import genjobscriptpython
import genjobscriptsh
import genjobscriptjava
from shared.base import client_id_dir
from shared.fileio import write_file, pickle, make_symlink
from shared.mrslparser import expand_variables
from shared.ssh import copy_file_to_resource, generate_ssh_rsa_key_pair
try:
import shared.mrsltoxrsl as mrsltoxrsl
import shared.arcwrapper as arc
except Exception, exc:
# Ignore errors and let it crash if ARC is enabled without the lib
pass
def create_empty_job(
unique_resource_name,
exe,
request_cputime,
sleep_factor,
localjobname,
execution_delay,
configuration,
logger,
):
"""Helper to create empty job for idle resources"""
job_dict = {'': ''}
helper_dict_filename = os.path.join(configuration.resource_home,
unique_resource_name,
'empty_job_helper_dict.%s' % exe)
max_cputime = int(request_cputime)
scaled_cputime = int(float(configuration.cputime_for_empty_jobs)
* sleep_factor)
if scaled_cputime > max_cputime:
cputime = max_cputime
sleep_time = int(0.8 * cputime)
else:
cputime = scaled_cputime
sleep_time = \
int(float(configuration.sleep_period_for_empty_jobs)
* sleep_factor)
logger.info(
'request_cputime: %d, sleep_factor: %.1f, cputime: %d, sleep time: %d',
max_cputime, sleep_factor, cputime, sleep_time)
job_id = configuration.empty_job_name + '.' + unique_resource_name\
+ '.' + exe + '.' + localjobname
job_dict['JOB_ID'] = job_id
# sessionid = configuration.empty_job_name
sleep_cmd = 'sleep ' + str(sleep_time)
job_dict['EXECUTE'] = [sleep_cmd]
job_dict['INPUTFILES'] = []
job_dict['OUTPUTFILES'] = ''
job_dict['ARGUMENTS'] = ''
job_dict['EXECUTABLES'] = ''
job_dict['MOUNT'] = []
job_dict['CPUTIME'] = str(cputime)
job_dict['MEMORY'] = 16
job_dict['DISK'] = 1
job_dict['EXECUTION_DELAY'] = str(execution_delay)
job_dict['ENVIRONMENT'] = ''
job_dict['RUNTIMEENVIRONMENT'] = []
job_dict['MAXPRICE'] = '0'
job_dict['JOBNAME'] = 'empty job'
client_id = configuration.empty_job_name
job_dict['USER_CERT'] = client_id
# create mRSL file only containing the unique_resource_name.
# This is used when the .status file from the empty job is
# uploaded, to find the unique name of the resource to be able
# to start the exe again if continuous is True
# if not os.path.isfile(helper_dict_filename):
helper_dict = {}
helper_dict['JOB_ID'] = job_id
helper_dict['UNIQUE_RESOURCE_NAME'] = unique_resource_name
helper_dict['EXE'] = exe
helper_dict['IS_EMPTY_JOB_HELPER_DICT'] = True
helper_dict['LOCALJOBNAME'] = localjobname
pickle(helper_dict, helper_dict_filename, logger)
return (job_dict, 'OK')
def create_restart_job(
unique_resource_name,
exe,
request_cputime,
sleep_factor,
localjobname,
execution_delay,
configuration,
logger,
):
"""Wrapper to create a dummy job for forcing repeated restart of dead
exes.
"""
empty_job, _ = create_empty_job(
unique_resource_name,
exe,
request_cputime,
sleep_factor,
localjobname,
execution_delay,
configuration,
logger,
)
empty_job['UNIQUE_RESOURCE_NAME'] = unique_resource_name
empty_job['EXE'] = exe
empty_job['LOCALJOBNAME'] = localjobname
empty_job['STATUS'] = 'Restart exe failed dummy'
empty_job['EXECUTING_TIMESTAMP'] = time.gmtime()
empty_job['RESOURCE_CONFIG'] = None
empty_job['SESSIONID'] = 'RESTARTFAILEDDUMMYID'
empty_job['IOSESSIONID'] = 'RESTARTFAILEDDUMMYID'
empty_job['EMPTY_JOB'] = True
return (empty_job, 'OK')
def create_job_script(
unique_resource_name,
exe,
job,
resource_config,
localjobname,
configuration,
logger,
):
"""Helper to create actual jobs for handout to a resource.
Returns tuple with job dict on success and None otherwise.
The job dict includes random generated sessionid and a I/O session id.
"""
job_dict = {'': ''}
sessionid = hexlify(open('/dev/urandom').read(32))
iosessionid = hexlify(open('/dev/urandom').read(32))
helper_dict_filename = os.path.join(configuration.resource_home,
unique_resource_name,
'empty_job_helper_dict.%s' % exe)
# Deep copy job for local changes
job_dict = deepcopy(job)
job_dict['SESSIONID'] = sessionid
job_dict['IOSESSIONID'] = iosessionid
# Create ssh rsa keys and known_hosts for job mount
mount_private_key = ""
mount_public_key = ""
mount_known_hosts = ""
if job_dict.get('MOUNT', []) != []:
# Generate public/private key pair for sshfs
(mount_private_key, mount_public_key) = generate_ssh_rsa_key_pair()
# Generate known_hosts
if not os.path.exists(configuration.user_sftp_key_pub):
msg = "job generation failed:"
msg = "%s user_sftp_key_pub: '%s' -> File _NOT_ found" % \
(msg, configuration.user_sftp_key_pub)
print msg
logger.error(msg)
return (None, msg)
sftp_address = configuration.user_sftp_show_address
sftp_addresses = socket.gethostbyname_ex(sftp_address or \
socket.getfqdn())
sftp_port = configuration.user_sftp_show_port
mount_known_hosts = "%s,[%s]:%s" % (sftp_addresses[0],
sftp_addresses[0], sftp_port)
for list_idx in xrange(1, len(sftp_addresses)):
for sftp_address in sftp_addresses[list_idx]:
mount_known_hosts += ",%s,[%s]:%s" % (sftp_address,
sftp_address,
sftp_port)
fd = open(configuration.user_sftp_key_pub, 'r')
mount_known_hosts = "%s %s" % (mount_known_hosts, fd.read())
fd.close()
job_dict['MOUNTSSHPUBLICKEY'] = mount_public_key
job_dict['MOUNTSSHPRIVATEKEY'] = mount_private_key
job_dict['MOUNTSSHKNOWNHOSTS'] = mount_known_hosts
if not job_dict.has_key('MAXPRICE'):
job_dict['MAXPRICE'] = '0'
# Finally expand reserved job variables like +JOBID+ and +JOBNAME+
job_dict = expand_variables(job_dict)
# ... no more changes to job_dict from here on
client_id = str(job_dict['USER_CERT'])
client_dir = client_id_dir(client_id)
# if not job:
if client_id == configuration.empty_job_name:
# create link to empty job
linkdest_empty_job = helper_dict_filename
linkloc_empty_job = configuration.sessid_to_mrsl_link_home\
+ sessionid + '.mRSL'
make_symlink(linkdest_empty_job, linkloc_empty_job, logger)
else:
# link sessionid to mrsl file
linkdest1 = configuration.mrsl_files_dir + client_dir + '/'\
+ str(job_dict['JOB_ID']) + '.mRSL'
linkloc1 = configuration.sessid_to_mrsl_link_home + sessionid\
+ '.mRSL'
make_symlink(linkdest1, linkloc1, logger)
# link sessionid to job owners home directory
linkdest2 = configuration.user_home + client_dir
linkloc2 = configuration.webserver_home + sessionid
make_symlink(linkdest2, linkloc2, logger)
# link iosessionid to job owners home directory
linkdest3 = configuration.user_home + client_dir
linkloc3 = configuration.webserver_home + iosessionid
make_symlink(linkdest3, linkloc3, logger)
# link sessionid to .job file
linkdest4 = configuration.mig_system_files + str(job_dict['JOB_ID'])\
+ '.job'
linkloc4 = configuration.webserver_home + sessionid + '.job'
make_symlink(linkdest4, linkloc4, logger)
# link sessionid to .getupdatefiles file
linkdest5 = configuration.mig_system_files + str(job_dict['JOB_ID'])\
+ '.getupdatefiles'
linkloc5 = configuration.webserver_home + sessionid\
+ '.getupdatefiles'
make_symlink(linkdest5, linkloc5, logger)
# link sessionid to .sendoutputfiles file
linkdest4 = configuration.mig_system_files + str(job_dict['JOB_ID'])\
+ '.sendoutputfiles'
linkloc4 = configuration.webserver_home + sessionid\
+ '.sendoutputfiles'
make_symlink(linkdest4, linkloc4, logger)
# link sessionid to .sendupdatefiles file
linkdest5 = configuration.mig_system_files + str(job_dict['JOB_ID'])\
+ '.sendupdatefiles'
linkloc5 = configuration.webserver_home + sessionid\
+ '.sendupdatefiles'
make_symlink(linkdest5, linkloc5, logger)
path_without_extension = os.path.join(configuration.resource_home,
unique_resource_name, localjobname)
gen_res = gen_job_script(
job_dict,
resource_config,
configuration,
localjobname,
path_without_extension,
client_dir,
exe,
logger,
)
if not gen_res:
msg = \
'job scripts were not generated. Perhaps you have specified ' + \
'an invalid SCRIPTLANGUAGE ? '
print msg
logger.error(msg)
return (None, msg)
inputfiles_path = path_without_extension + '.getinputfiles'
# hack to ensure that a resource has a sandbox keyword
if resource_config.get('SANDBOX', False):
# Move file to webserver_home for download as we can't push it to
# sandboxes
try:
# RA TODO: change download filename to something that
# includes sessionid
webserver_path = os.path.join(configuration.webserver_home,
localjobname + '.getinputfiles')
os.rename(inputfiles_path, webserver_path)
# ########## ATTENTION HACK TO MAKE JVM SANDBOXES WORK ############
# This should be changed to use the (to be developed) RE pre/post
# processing framework. For now the user must have a jvm dir in his
# home dir where the classfiles is located this should be changed
# so that the execution homepath can be specified in the mRSL
# jobfile
# Martin Rehr 08/09/06
# If this is a oneclick job link the users jvm dir to
# webserver_home/sandboxkey.oneclick
# This is done because the client applet uses the
# codebase from which it is originaly loaded
# Therefore the codebase must be dynamicaly changed
# for every job
if resource_config.has_key('PLATFORM')\
and resource_config['PLATFORM'] == 'ONE-CLICK':
# A two step link is made.
# First sandboxkey.oneclick is made to point to
# sessiondid.jvm
# Second sessionid.jvm is set to point to
# USER_HOME/jvm
# This is done for security and easy cleanup,
# sessionid.jvm is cleaned up
# by the server upon job finish/timeout and
# thereby leaving no open entryes to the users
# jvm dir.
linkintermediate = configuration.webserver_home\
+ sessionid + '.jvm'
if client_dir == configuration.empty_job_name:
linkdest = \
os.path.abspath(configuration.javabin_home)
else:
linkdest = configuration.user_home + client_dir\
+ os.sep + 'jvm'
# Make link sessionid.jvm -> USER_HOME/jvm
make_symlink(linkdest, linkintermediate, logger)
linkloc = configuration.webserver_home\
+ resource_config['SANDBOXKEY'] + '.oneclick'
# Remove previous symlink
# This must be done in a try/catch as the symlink,
# may be a dead link and 'if os.path.exists(linkloc):'
# will then return false, even though the link exists.
try:
os.remove(linkloc)
except:
pass
# Make link sandboxkey.oneclick -> sessionid.jvm
make_symlink(linkintermediate, linkloc, logger)
except Exception, err:
# ######### End JVM SANDBOX HACK ###########
msg = "File '%s' was not copied to the webserver home."\
% inputfiles_path
print '\nERROR: ' + str(err)
logger.error(msg)
return (None, msg)
return (job_dict, 'OK')
# Copy file to the resource
if not copy_file_to_resource(inputfiles_path,
os.path.basename(inputfiles_path),
resource_config, logger):
logger.error('File was not copied to the resource: '
+ inputfiles_path)
else:
# file was sent, delete it
try:
os.remove(inputfiles_path)
except:
logger.error('could not remove ' + inputfiles_path)
return (job_dict, 'OK')
def create_arc_job(
job,
configuration,
logger,
):
"""Analog to create_job_script for ARC jobs:
Creates symLinks for receiving result files, translates job dict to ARC
xrsl, and stores resulting job script (xrsl + sh script) for submitting.
We do _not_ create a separate job_dict with copies and SESSIONID inside,
as opposed to create_job_script, all we need is the link from
webserver_home / sessionID into the user's home directory
("job_output/job['JOB_ID']" is added to the result upload URLs in the
translation).
Returns message (ARC job ID if no error) and sessionid (None if error)
"""
if not configuration.arc_clusters:
return (None, 'No ARC support!')
if not job['JOBTYPE'] == 'arc':
return (None, 'Error. This is not an ARC job')
# Deep copy job for local changes
job_dict = deepcopy(job)
# Finally expand reserved job variables like +JOBID+ and +JOBNAME+
job_dict = expand_variables(job_dict)
# ... no more changes to job_dict from here on
client_id = str(job_dict['USER_CERT'])
# we do not want to see empty jobs here. Test as done in create_job_script.
if client_id == configuration.empty_job_name:
return (None, 'Error. empty job for ARC?')
# generate random session ID:
sessionid = hexlify(open('/dev/urandom').read(32))
logger.debug('session ID (for creating links): %s' % sessionid)
client_dir = client_id_dir(client_id)
# make symbolic links inside webserver_home:
#
# we need: link to owner's dir. to receive results,
# job mRSL inside sessid_to_mrsl_link_home
linklist = [(configuration.user_home + client_dir,
configuration.webserver_home + sessionid),
(configuration.mrsl_files_dir + client_dir + '/' + \
str(job_dict['JOB_ID']) + '.mRSL',
configuration.sessid_to_mrsl_link_home + sessionid + '.mRSL')
]
for (dest, loc) in linklist:
make_symlink(dest, loc, logger)
# the translation generates an xRSL object which specifies to execute
# a shell script with script_name. If sessionid != None, results will
# be uploaded to sid_redirect/sessionid/job_output/job_id
try:
(xrsl, script, script_name) = mrsltoxrsl.translate(job_dict, sessionid)
logger.debug('translated to xRSL: %s' % xrsl)
logger.debug('script:\n %s' % script)
except Exception, err:
# error during translation, pass a message
logger.error('Error during xRSL translation: %s' % err.__str__())
return (None, err.__str__())
# we submit directly from here (the other version above does
# copyFileToResource and gen_job_script generates all files)
# we have to put the generated script somewhere..., and submit from there.
# inputfiles are given by the user as relative paths from his home,
# so we should use that location (and clean up afterwards).
# write script (to user home)
user_home = os.path.join(configuration.user_home, client_dir)
script_path = os.path.abspath(os.path.join(user_home, script_name))
write_file(script, script_path, logger)
os.chdir(user_home)
try:
logger.debug('submitting job to ARC')
session = arc.Ui(user_home)
arc_job_ids = session.submit(xrsl)
# if no exception occurred, we are done:
job_dict['ARCID'] = arc_job_ids[0]
job_dict['SESSIONID'] = sessionid
msg = 'OK'
result = job_dict
# when errors occurred, pass a message to the caller.
except arc.ARCWrapperError, err:
msg = err.what()
result = None # unsuccessful
except arc.NoProxyError, err:
msg = 'No Proxy found: %s' % err.what()
result = None # unsuccessful
except Exception, err:
msg = err.__str__()
result = None # unsuccessful
# always remove the generated script
os.remove(script_name)
# and remove the created links immediately if failed
if not result:
for (_, link) in linklist:
os.remove(link)
logger.error('Unsuccessful ARC job submission: %s' % msg)
else:
logger.debug('submitted to ARC as job %s' % msg)
return (result, msg)
# errors are handled inside grid_script. For ARC jobs, set status = FAILED
# on errors, and include the message
# One potential error is that the proxy is invalid,
# which should be checked inside the parser, before informing
# grid_script about the new job.
def gen_job_script(
job_dictionary,
resource_config,
configuration,
localjobname,
path_without_extension,
client_dir,
exe,
logger,
):
"""Generate job script from job_dictionary before handout to resource"""
script_language = resource_config['SCRIPTLANGUAGE']
if not script_language in configuration.scriptlanguages:
print 'Unknown script language! (conflict with scriptlanguages in ' + \
'configuration?) %s not in %s' % (script_language,
configuration.scriptlanguages)
return False
if script_language == 'python':
generator = genjobscriptpython.GenJobScriptPython(
job_dictionary,
resource_config,
exe,
configuration.migserver_https_sid_url,
localjobname,
path_without_extension,
)
elif script_language == 'sh':
generator = genjobscriptsh.GenJobScriptSh(
job_dictionary,
resource_config,
exe,
configuration.migserver_https_sid_url,
localjobname,
path_without_extension,
)
elif script_language == 'java':
generator = genjobscriptjava.GenJobScriptJava(job_dictionary,
resource_config, configuration.migserver_https_sid_url,
localjobname, path_without_extension)
else:
print 'Unknown script language! (is in configuration but not in ' + \
'jobscriptgenerator) %s ' % script_language
return False
# String concatenation in python: [X].join is much faster
# than repeated use of s += strings
getinputfiles_array = []
getinputfiles_array.append(generator.script_init())
getinputfiles_array.append(generator.comment('print start'))
getinputfiles_array.append(generator.print_start('get input files'))
getinputfiles_array.append(generator.comment('init log'))
getinputfiles_array.append(generator.init_io_log())
getinputfiles_array.append(generator.comment('get special inputfiles'
))
getinputfiles_array.append(generator.get_special_input_files(
'get_special_status'))
getinputfiles_array.append(generator.log_io_status(
'get_special_input_files', 'get_special_status'))
getinputfiles_array.append(generator.print_on_error('get_special_status'
, '0',
'failed to fetch special input files!'))
getinputfiles_array.append(generator.comment('get input files'))
getinputfiles_array.append(generator.get_input_files('get_input_status'
))
getinputfiles_array.append(generator.log_io_status('get_input_files'
, 'get_input_status'))
getinputfiles_array.append(generator.print_on_error('get_input_status'
, '0', 'failed to fetch input files!'))
getinputfiles_array.append(generator.comment('get executables'))
getinputfiles_array.append(generator.get_executables(
'get_executables_status'))
getinputfiles_array.append(generator.log_io_status('get_executables'
, 'get_executables_status'))
getinputfiles_array.append(generator.print_on_error(
'get_executables_status', '0', 'failed to fetch executable files!'))
# client_dir equals empty_job_name for sleep jobs
getinputfiles_array.append(generator.generate_output_filelists(
client_dir != configuration.empty_job_name,
'generate_output_filelists'))
getinputfiles_array.append(generator.print_on_error(
'generate_output_filelists', '0',
'failed to generate output filelists!'))
getinputfiles_array.append(generator.generate_input_filelist(
'generate_input_filelist'))
getinputfiles_array.append(generator.print_on_error(
'generate_input_filelist', '0', 'failed to generate input filelist!'))
getinputfiles_array.append(generator.generate_iosessionid_file(
'generate_iosessionid_file'))
getinputfiles_array.append(generator.print_on_error(
'generate_iosessionid_file', '0',
'failed to generate iosessionid file!'))
getinputfiles_array.append(generator.generate_mountsshprivatekey_file(
'generate_mountsshprivatekey_file'))
getinputfiles_array.append(generator.print_on_error(
'generate_mountsshprivatekey_file', '0',
'failed to generate mountsshprivatekey file!'))
getinputfiles_array.append(generator.generate_mountsshknownhosts_file(
'generate_mountsshknownhosts_file'))
getinputfiles_array.append(generator.print_on_error(
'generate_mountsshknownhosts_file', '0',
'failed to generate mountsshknownhosts file!'))
getinputfiles_array.append(generator.total_status(
['get_special_status', 'get_input_status', 'get_executables_status',
'generate_output_filelists'], 'total_status'))
getinputfiles_array.append(generator.exit_on_error('total_status',
'0', 'total_status'))
getinputfiles_array.append(generator.comment('exit script'))
getinputfiles_array.append(generator.exit_script('0', 'get input files'))
job_array = []
job_array.append(generator.script_init())
job_array.append(generator.set_core_environments())
job_array.append(generator.print_start('job'))
job_array.append(generator.comment('TODO: switch to job directory here'))
job_array.append(generator.comment('make sure job status files exist'))
job_array.append(generator.create_files([job_dictionary['JOB_ID']
+ '.stdout', job_dictionary['JOB_ID'] + '.stderr'
, job_dictionary['JOB_ID'] + '.status']))
job_array.append(generator.init_status())
job_array.append(generator.comment('chmod +x'))
job_array.append(generator.chmod_executables('chmod_status'))
job_array.append(generator.print_on_error(
'chmod_status', '0',
'failed to make one or more EXECUTABLES executable'))
job_array.append(generator.log_on_error('chmod_status', '0',
'system: chmod'))
job_array.append(generator.comment('set environments'))
job_array.append(generator.set_environments('env_status'))
job_array.append(generator.print_on_error(
'env_status', '0', 'failed to initialize one or more ENVIRONMENTs'))
job_array.append(generator.log_on_error('env_status', '0',
'system: set environments'))
job_array.append(generator.comment('set runtimeenvironments'))
job_array.append(generator.set_runtime_environments(
resource_config['RUNTIMEENVIRONMENT'], 're_status'))
job_array.append(generator.print_on_error(
're_status', '0',
'failed to initialize one or more RUNTIMEENVIRONMENTs'))
job_array.append(generator.log_on_error('re_status', '0',
'system: set RUNTIMEENVIRONMENTs'))
job_array.append(generator.comment('enforce some basic job limits'))
job_array.append(generator.set_limits())
if job_dictionary.get('MOUNT', []) != []:
job_array.append(generator.comment('Mount job home'))
job_array.append(generator.mount(job_dictionary['SESSIONID'],
configuration.user_sftp_show_address,
configuration.user_sftp_show_port,
'mount_status'))
job_array.append(generator.print_on_error('mount_status', '0',
'failded to mount job home'))
job_array.append(generator.log_on_error('mount_status', '0',
'system: mount'))
job_array.append(generator.comment('execute!'))
job_array.append(generator.execute('EXECUTING: ', '--Exit code:'))
if job_dictionary.get('MOUNT', []) != []:
job_array.append(generator.comment('Unmount job home'))
job_array.append(generator.umount('umount_status'))
job_array.append(generator.print_on_error(
'umount_status', '0', 'failded to umount job home'))
job_array.append(generator.log_on_error('umount_status', '0',
'system: umount'))
job_array.append(generator.comment('exit script'))
job_array.append(generator.exit_script('0', 'job'))
getupdatefiles_array = []
# We need to make sure that curl failures lead to retry while
# missing output (from say a failed job) is logged but
# ignored in relation to getupdatefiles success.
getupdatefiles_array.append(generator.print_start('get update files'))
getupdatefiles_array.append(generator.init_io_log())
getupdatefiles_array.append(generator.comment('get io files'))
getupdatefiles_array.append(generator.get_io_files('get_io_status'))
getupdatefiles_array.append(generator.log_io_status('get_io_files'
, 'get_io_status'))
getupdatefiles_array.append(generator.print_on_error(
'get_io_status', '0', 'failed to get one or more IO files'))
getupdatefiles_array.append(generator.exit_on_error(
'get_io_status', '0', 'get_io_status'))
getupdatefiles_array.append(generator.comment('exit script'))
getupdatefiles_array.append(generator.exit_script('0', 'get update files'))
sendoutputfiles_array = []
# We need to make sure that curl failures lead to retry while
# missing output (from say a failed job) is logged but
# ignored in relation to sendoutputfiles success.
sendoutputfiles_array.append(generator.print_start('send output files'))
sendoutputfiles_array.append(generator.init_io_log())
sendoutputfiles_array.append(generator.comment('check output files'))
sendoutputfiles_array.append(generator.output_files_missing(
'missing_counter'))
sendoutputfiles_array.append(generator.log_io_status(
'output_files_missing', 'missing_counter'))
sendoutputfiles_array.append(generator.print_on_error(
'missing_counter', '0', 'missing output files'))
sendoutputfiles_array.append(generator.comment('send output files'))
sendoutputfiles_array.append(generator.send_output_files(
'send_output_status'))
sendoutputfiles_array.append(generator.log_io_status('send_output_files',
'send_output_status'))
sendoutputfiles_array.append(generator.print_on_error(
'send_output_status', '0', 'failed to send one or more outputfiles'))
sendoutputfiles_array.append(generator.exit_on_error(
'send_output_status', '0', 'send_output_status'))
sendoutputfiles_array.append(generator.comment('send io files'))
sendoutputfiles_array.append(generator.send_io_files('send_io_status'))
sendoutputfiles_array.append(generator.log_io_status('send_io_files'
, 'send_io_status'))
sendoutputfiles_array.append(generator.print_on_error(
'send_io_status', '0', 'failed to send one or more IO files'))
sendoutputfiles_array.append(generator.exit_on_error(
'send_io_status', '0', 'send_io_status'))
sendoutputfiles_array.append(generator.comment('send status files'))
sendoutputfiles_array.append(generator.send_status_files(
[job_dictionary['JOB_ID'] + '.io-status'], 'send_io_status_status'))
sendoutputfiles_array.append(generator.print_on_error(
'send_io_status_status', '0', 'failed to send io-status file'))
sendoutputfiles_array.append(generator.exit_on_error(
'send_io_status_status', '0', 'send_io_status_status'))
# Please note that .status upload marks the end of the
# session and thus it must be the last uploaded file.
sendoutputfiles_array.append(generator.send_status_files(
[job_dictionary['JOB_ID'] + '.status'], 'send_status_status'))
sendoutputfiles_array.append(generator.print_on_error(
'send_status_status', '0', 'failed to send status file'))
sendoutputfiles_array.append(generator.exit_on_error(
'send_status_status', '0', 'send_status_status'))
# Note that ID.sendouputfiles is called from frontend_script
# so exit on failure can be handled there.
sendoutputfiles_array.append(generator.comment('exit script'))
sendoutputfiles_array.append(generator.exit_script('0',
'send output files'))
sendupdatefiles_array = []
# We need to make sure that curl failures lead to retry while
# missing output (from say a failed job) is logged but
# ignored in relation to sendupdatefiles success.
sendupdatefiles_array.append(generator.print_start('send update files'))
sendupdatefiles_array.append(generator.init_io_log())
sendupdatefiles_array.append(generator.comment('send io files'))
sendupdatefiles_array.append(generator.send_io_files('send_io_status'))
sendupdatefiles_array.append(generator.log_io_status('send_io_files'
, 'send_io_status'))
sendupdatefiles_array.append(generator.print_on_error(
'send_io_status', '0', 'failed to send one or more IO files'))
sendupdatefiles_array.append(generator.exit_on_error(
'send_io_status', '0', 'send_io_status'))
sendupdatefiles_array.append(generator.comment('exit script'))
sendupdatefiles_array.append(generator.exit_script('0',
'send update files'))
# clean up must be done with SSH (when the .status file
# has been uploaded): Job script can't safely/reliably clean up
# after itself because of possible user interference.
if job_dictionary.has_key('JOBTYPE') and job_dictionary['JOBTYPE'
].lower() == 'interactive':
# interactive jobs have a .job file just containing a curl
# call to the MiG servers cgi-sid/requestinteractivejob
# and the usual .job is instead called .interactivejob and
# is SCP'ed and started by SSH in the requestinteractive.py
# script
logger.error('jobtype: interactive')
interactivejobfile = generator.script_init() + '\n'\
+ generator.request_interactive() + '\n'\
+ generator.exit_script('0', 'interactive job')
# write the small file containing the requestinteractivejob.py
# call as .job
write_file(interactivejobfile, configuration.mig_system_files
+ job_dictionary['JOB_ID'] + '.job', logger)
# write the usual .job file as .interactivejob
write_file('\n'.join(job_array), configuration.mig_system_files
+ job_dictionary['JOB_ID'] + '.interactivejob',
logger)
print interactivejobfile
else:
# write files
write_file('\n'.join(job_array), configuration.mig_system_files
+ job_dictionary['JOB_ID'] + '.job', logger)
write_file('\n'.join(getinputfiles_array), path_without_extension
+ '.getinputfiles', logger)
write_file('\n'.join(getupdatefiles_array),
configuration.mig_system_files + job_dictionary['JOB_ID']
+ '.getupdatefiles', logger)
write_file('\n'.join(sendoutputfiles_array),
configuration.mig_system_files + job_dictionary['JOB_ID']
+ '.sendoutputfiles', logger)
write_file('\n'.join(sendupdatefiles_array),
configuration.mig_system_files + job_dictionary['JOB_ID']
+ '.sendupdatefiles', logger)
return True
|
heromod/migrid
|
mig/server/jobscriptgenerator.py
|
Python
|
gpl-2.0
| 35,103
|
[
"Brian"
] |
8dd460e28e04ac3dbfa58b08190c106f6b2b510549586ab4aab8dda3a05e6f5d
|
"""Various tools for surface desciption"""
import numpy as np
def spectrum(wk, sh, cl, th, n=1, kind='isotropic gaussian', **kwargs):
"""Surface roughness spectrum at at order n
"""
if kind == 'isotropic gaussian':
out = np.float64(cl)**2/n*np.exp(-(wk*cl*np.sin(th))**2/n) /2.
else:
raise ValueError("Unsupported value for kind: " + kind)
return out
|
cgrima/subradar
|
subradar/roughness.py
|
Python
|
mit
| 391
|
[
"Gaussian"
] |
381c48accee411d841f8bcd12dae60280f939415fae3f3c5c9e2d13bfc3176e5
|
"""deployment scripts for researchcompendia
with credit to scipy-2014 fabric.py
fab <dev|staging|prod|vagrant> <deploy|provision>[:<git ref>]
deploy: deploys the site to the specified environment. if no git ref is provided, deploys head
provision: provisions a box to run the site. is not idempotent. do not rerun.
git ref: a git branch, hash, tag
example usages:
deploy deploys a new version of the site with a new virtualenv. it starts by
placing a maintenance page, stops the website, updates it to version 1.1.1,
restarts it, and brings back the main page.
$ fab vagrant deploy:1.1.1
the following combination of calls does the previous steps by hand except for
creating a new virtualenv.
$ fab vagrant dust
$ fab vagrant stop:researchcompendia
$ fab vagrant update:1.1.1
$ fab vagrant start:researchcompendia
$ fab vagrant undust
provision: completely provisions a new box. You should be able to run this and
visit the box afterwards to see the site. If you are using the vagrant
environment, visit http://127.0.0.1:8000
$ fab vagrant provision:1.1.1
"""
import datetime, string, random, re
from os.path import join, dirname, abspath
import fabric.api
from fabric.api import run, task, env, cd, sudo, local, put
from fabric.contrib.files import sed, append
from fabtools import require, supervisor, postgres, deb, files
from fabtools.files import upload_template
from fabtools.user import home_directory
import fabtools
env.disable_known_hosts = True
SITE_USER = 'tyler'
SITE_GROUP = 'tyler'
SITE_NAME = 'tyler'
SITE_REPO = 'git://github.com/researchcompendia/researchcompendia.git'
FAB_HOME = dirname(abspath(__file__))
TEMPLATE_DIR = join(FAB_HOME, 'templates')
@task
def dev():
env.update({
'carbon': '10.176.162.45',
'site': '.codersquid.com',
'available': 'researchcompendia',
'hosts': ['67.207.156.211:2222'],
'site_environment': 'dev_environment.sh',
})
@task
def staging():
env.update({
'carbon': '10.176.162.45',
'site': 'labs.researchcompendia.org',
'available': 'researchcompendia',
'hosts': ['labs.researchcompendia.org:2222'],
'site_environment': 'staging_environment.sh',
})
@task
def prod():
env.update({
'carbon': '10.176.162.45',
'site': 'researchcompendia.org',
'available': 'researchcompendia',
'hosts': ['researchcompendia.org:2222'],
'site_environment': 'prod_environment.sh',
})
@task
def vagrant():
env.update({
'carbon': '10.176.162.45',
'user': 'vagrant',
'site': 'localhost',
'available': 'researchcompendia',
'hosts': ['127.0.0.1:2222'],
'site_environment': 'dev_environment.sh',
'key_filename': local('vagrant ssh-config | grep IdentityFile | cut -f4 -d " "', capture=True),
})
@task
def uname():
"""the hello world of fabric
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
run('uname -a')
@task
def deploy(version_tag=None):
"""deploys a new version of the site with a new virtualenv
version_tag: a git tag, defaults to HEAD
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
dust()
stop('researchcompendia')
new_env = virtualenv_name(commit=version_tag)
mkvirtualenv(new_env)
update_site_version(new_env)
update(commit=version_tag)
install_site_requirements(new_env)
#collectstatic()
start('researchcompendia')
undust()
@task
def stop(process_name):
"""stops supervisor process
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
supervisor.stop_process(process_name)
@task
def start(process_name):
"""starts supervisor process
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
supervisor.start_process(process_name)
@task
def update(commit=None):
site_root = join(home_directory(SITE_USER), 'site')
repodir = join(site_root, SITE_NAME)
if not files.is_dir(repodir):
with cd(site_root):
su('git clone %s %s' % (SITE_REPO, SITE_NAME))
with cd(repodir):
su('git fetch')
if commit is None:
commit = 'origin/master'
su('git checkout %s' % commit)
@task
def migrate(app):
""" run south migration on specified app
"""
environment = join(home_directory(SITE_USER), 'site/bin/environment.sh')
djangodir = join(home_directory(SITE_USER), 'site', SITE_NAME, 'companionpages')
with cd(djangodir):
vsu('source %s; ./manage.py migrate %s' % (environment, app))
@task
def dust():
"""Take down the researchcompendia site and enable the maintenance site
"""
require.nginx.disabled('researchcompendia')
require.nginx.enabled('maintenance')
sudo('service nginx restart')
@task
def undust():
"""Brings back the site
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
require.nginx.disable('maintenance')
require.nginx.enable('researchcompendia')
sudo('service nginx restart')
@task
def provision(version_tag=None, everything=False):
"""Run only once to provision a new host.
This is not idempotent. Only run once!
version_tag: branch of git repo to use
everything: whether or not to rabbitmq and other services
"""
fabric.api.require('site', 'available', 'hosts', 'site_environment',
provided_by=('dev', 'staging', 'prod', 'vagrant'))
install_dependencies()
lockdown_nginx()
lockdown_ssh()
setup_database()
setup_site_user()
setup_site_root()
setup_envvars()
update(version_tag)
setup_django(version_tag)
setup_nginx()
setup_supervisor()
if everything:
crontab_download_checker()
setup_rabbitmq()
setup_elasticsearch()
crontab_update_index()
def setup_collectd():
""" installs collectd and configures it to talk to graphite
"""
require.deb.packages(['collectd',])
hostname = run('hostname')
upload_template('collectd.conf', '/etc/collectd/collectd.conf', use_jinja=True,
context={'carbonhost': env.carbon, 'hostname': hostname},
template_dir=TEMPLATE_DIR, use_sudo=True)
sudo('/etc/init.d/collectd restart')
def setup_rabbitmq(user=None):
""" installs official rabbitmq package and sets up user
"""
deb.add_apt_key(url='http://www.rabbitmq.com/rabbitmq-signing-key-public.asc')
require.deb.source('rabbitmq-server', 'http://www.rabbitmq.com/debian/', 'testing', 'main')
require.deb.uptodate_index(max_age={'hour': 1})
require.deb.packages(['rabbitmq-server',])
if user is None:
user = SITE_USER
envfile = join(home_directory(SITE_USER), 'site/bin/environment.sh')
secret = randomstring(64)
sudo('rabbitmqctl delete_user guest')
sudo('rabbitmqctl add_user %s "%s"' % (user, secret))
sudo('rabbitmqctl set_permissions -p / %s ".*" ".*" ".*"' % user)
amqp_url = 'amqp://%s:%s@localhost:5672//' % (SITE_USER, secret)
sed(envfile, 'DJANGO_BROKER_URL=".*"', 'DJANGO_BROKER_URL="%s"' % amqp_url, use_sudo=True)
def setup_elasticsearch():
deb.add_apt_key(url='http://packages.elasticsearch.org/GPG-KEY-elasticsearch')
require.deb.source('elasticsearch', 'http://packages.elasticsearch.org/elasticsearch/1.1/debian', 'stable', 'main')
require.deb.uptodate_index(max_age={'hour': 1})
require.deb.packages(['elasticsearch',])
sudo('update-rc.d elasticsearch defaults 95 10')
def setup_nginx():
site_root = join(home_directory(SITE_USER), 'site')
upload_template('researchcompendia_nginx',
'/etc/nginx/sites-available/researchcompendia',
context={
'server_name': env.site,
'access_log': join(site_root, 'logs', 'access.log'),
'error_log': join(site_root, 'logs', 'error.log'),
'static_location': join(site_root, 'static/'),
'media_location': join(site_root, 'media/'),
},
use_jinja=True, use_sudo=True, template_dir=TEMPLATE_DIR)
require.nginx.enabled('researchcompendia')
require.nginx.disabled('default')
put(template_path('maintenance_nginx'), '/etc/nginx/sites-available/maintenance', use_sudo=True)
put(template_path('maintenance_index.html'), '/usr/share/nginx/www/index.html', use_sudo=True)
def setup_supervisor():
site_root = join(home_directory(SITE_USER), 'site')
upload_template('researchcompendia.conf',
'/etc/supervisor/conf.d/researchcompendia_web.conf',
context={
'command': join(site_root, 'bin', 'runserver.sh'),
'user': SITE_USER,
'group': SITE_GROUP,
'logfile': join(site_root, 'logs', 'gunicorn_supervisor.log'),
},
use_jinja=True, use_sudo=True, template_dir=TEMPLATE_DIR)
upload_template('celeryd.conf',
'/etc/supervisor/conf.d/celeryd.conf',
context={
'command': join(site_root, 'bin', 'celeryworker.sh'),
'user': SITE_USER,
'group': SITE_GROUP,
'logfile': join(site_root, 'logs', 'celery_worker.log'),
},
use_jinja=True, use_sudo=True, template_dir=TEMPLATE_DIR)
supervisor.update_config()
def lockdown_nginx():
# don't share nginx version in header and error pages
sed('/etc/nginx/nginx.conf', '# server_tokens off;', 'server_tokens off;', use_sudo=True)
sudo('service nginx restart')
def lockdown_ssh():
sed('/etc/ssh/sshd_config', '^#PasswordAuthentication yes', 'PasswordAuthentication no', use_sudo=True)
append('/etc/ssh/sshd_config', ['UseDNS no', 'PermitRootLogin no', 'DebianBanner no', 'TcpKeepAlive yes'], use_sudo=True)
sudo('service ssh restart')
def setup_django(version_tag):
virtualenv = virtualenv_name(commit=version_tag)
mkvirtualenv(virtualenv)
update_site_version(virtualenv)
install_site_requirements(virtualenv)
collectstatic()
syncdb()
load_fixtures()
def syncdb():
environment = join(home_directory(SITE_USER), 'site/bin/environment.sh')
djangodir = join(home_directory(SITE_USER), 'site', SITE_NAME, 'companionpages')
with cd(djangodir):
vsu('source %s; ./manage.py syncdb --noinput --migrate' % environment)
def load_fixtures():
environment = join(home_directory(SITE_USER), 'site/bin/environment.sh')
djangodir = join(home_directory(SITE_USER), 'site', SITE_NAME, 'companionpages')
with cd(djangodir):
vsu('source %s; ./manage.py loaddata fixtures/*' % environment)
def install_site_requirements(virtualenv):
home = home_directory(SITE_USER)
with cd(join(home, 'site', SITE_NAME)):
vsu('pip install -r requirements/production.txt', virtualenv=virtualenv)
def setup_database():
require.postgres.server()
# NOTE: fabtools.require.postgres.user did not allow me to create a user with no pw prompt?
if not postgres.user_exists(SITE_USER):
su('createuser -S -D -R -w %s' % SITE_USER, 'postgres')
if not postgres.database_exists(SITE_USER):
require.postgres.database(SITE_USER, SITE_USER, encoding='UTF8', locale='en_US.UTF-8')
# change default port
# port = 5432
# /etc/postgresql/9.1/main/postgresql.conf
def setup_site_user():
if not fabtools.user.exists(SITE_USER):
sudo('useradd -s/bin/bash -d/home/%s -m %s' % (SITE_USER, SITE_USER))
def setup_envvars():
""" copies secrets from env files that are not checked in to the deployment repo
"""
env_template_dir = join(FAB_HOME, 'env')
secret = randomstring(64)
site_root = join(home_directory(SITE_USER), 'site')
bindir = join(site_root, 'bin')
static_root = join(site_root, 'static')
media_root = join(site_root, 'media')
destination_envfile = join(bindir, 'environment.sh')
with cd(bindir):
upload_template(env.site_environment, destination_envfile,
context={
'secret_key': secret,
'static_root': static_root,
'media_root': media_root,
},
template_dir=env_template_dir,
use_jinja=True, use_sudo=True, chown=True, user=SITE_USER)
def setup_site_root():
site_root = join(home_directory(SITE_USER), 'site')
bindir = join(site_root, 'bin')
with cd(home_directory(SITE_USER)):
su('mkdir -p venvs site')
with cd(site_root):
su('mkdir -p logs bin env media static')
put(template_path('runserver.sh'), bindir, use_sudo=True)
put(template_path('celeryworker.sh'), bindir, use_sudo=True)
put(template_path('check_downloads.sh'), bindir, use_sudo=True)
sudo('chown -R %s:%s %s' % (SITE_USER, SITE_USER, site_root))
with cd(bindir):
su('chmod +x runserver.sh celeryworker.sh check_downloads.sh')
def crontab_download_checker():
site_root = join(home_directory(SITE_USER), 'site')
bindir = join(site_root, 'bin')
job = join(home_directory(SITE_USER), 'check_downloads')
upload_template('check_downloads', job,
context={
'command': join(bindir, 'check_downloads.sh'),
'logfile': join(site_root, 'logs', 'cron_checkdownloads.log'),
},
use_jinja=True, use_sudo=True, template_dir=TEMPLATE_DIR)
sudo('chown %s:%s %s' % (SITE_USER, SITE_USER, job))
su('crontab %s' % job)
def crontab_update_index():
site_root = join(home_directory(SITE_USER), 'site')
bindir = join(site_root, 'bin')
job = join(home_directory(SITE_USER), 'update_index')
upload_template('update_index', job,
context={
'command': join(bindir, 'update_index.sh'),
'logfile': join(site_root, 'logs', 'cron_update_index.log'),
},
use_jinja=True, use_sudo=True, template_dir=TEMPLATE_DIR)
sudo('chown %s:%s %s' % (SITE_USER, SITE_USER, job))
su('crontab %s' % job)
def install_dependencies():
require.deb.uptodate_index(max_age={'hour': 1})
require.deb.packages([
'python-software-properties',
'python-dev',
'build-essential',
'python-pip',
'git',
'nginx-extras',
'libxslt1-dev',
'supervisor',
'postgresql',
'postgresql-server-dev-9.1',
'memcached',
'memcached',
'libmemcached-dev',
# fun stuff
'tig',
'vim',
'exuberant-ctags',
'multitail',
'curl',
'tmux',
'htop',
'ack-grep',
])
require.python.packages([
'virtualenvwrapper',
'setproctitle',
], use_sudo=True)
def install_python_packages():
sudo('wget https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py')
sudo('wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py')
sudo('python ez_setup.py')
sudo('python get-pip.py')
# install global python packages
require.python.packages([
'virtualenvwrapper',
'setproctitle',
], use_sudo=True)
def su(cmd, user=None):
if user is None:
user = SITE_USER
sudo("su %s -c '%s'" % (user, cmd))
def vsu(cmd, virtualenv=None, user=None):
if virtualenv is None:
virtualenv = get_site_version()
if user is None:
user = SITE_USER
home = home_directory(user)
venvdir = join(home, 'venvs', virtualenv, 'bin/activate')
sudo("su %s -c 'source %s; %s'" % (user, venvdir, cmd))
def update_site_version(site_version):
envfile = join(home_directory(SITE_USER), 'site/bin/environment.sh')
sed(envfile, 'SITE_VERSION=".*"', 'SITE_VERSION="%s"' % site_version, use_sudo=True)
def collectstatic():
environment = join(home_directory(SITE_USER), 'site/bin/environment.sh')
djangodir = join(home_directory(SITE_USER), 'site', SITE_NAME, 'companionpages')
# ignoring logs and media objects in our s3 container
# this shouldn't be necessary but is a consequence of using django-storages with the boto backend
# and as of now, django-storages doesn't support separate containers for static and media.
with cd(djangodir):
vsu('source %s; ./manage.py collectstatic --noinput --clear --ignore *results* --ignore *log* --ignore *materials* --ignore *articles*' % environment)
def get_site_version():
site_line = run('grep SITE_VERSION %s' % join(home_directory(SITE_USER), 'site/bin/environment.sh'))
match = re.search(r'export SITE_VERSION="(?P<version>[^"]+)', site_line)
g = match.groupdict()
return g['version']
def randomstring(n):
return ''.join(random.choice(string.ascii_letters + string.digits + '~@#%^&*-_') for x in range(n))
def virtualenv_name(commit=None):
if commit is None:
repodir = join(home_directory(SITE_USER), 'site', SITE_NAME)
with cd(repodir):
commit = run('git rev-parse HEAD').strip()
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
return '%s-%s' % (timestamp, commit.replace('/', '_'))
def template_path(filename):
return join(FAB_HOME, 'templates', filename)
def mkvirtualenv(virtualenv):
with cd(join(home_directory(SITE_USER), 'venvs')):
su('virtualenv %s' % virtualenv)
|
researchcompendia/researchcompendia-deployment
|
fabfile.py
|
Python
|
mit
| 17,507
|
[
"VisIt"
] |
dcb1ae527788bdab2df971e64154fdb4c1efe36f982b9838ba9511f4df74de2d
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Red Hat, Inc.
# Copyright (c) 2010 Ville Skyttä
# Copyright (c) 2009 Tim Lauridsen
# Copyright (c) 2007 Marcus Kuhn
#
# kitchen is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# kitchen is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with kitchen; if not, see <http://www.gnu.org/licenses/>
#
# Authors:
# James Antill <james@fedoraproject.org>
# Marcus Kuhn
# Toshio Kuratomi <toshio@fedoraproject.org>
# Tim Lauridsen
# Ville Skyttä
#
# Portions of this are from yum/i18n.py
'''
-----------------------
Format Text for Display
-----------------------
Functions related to displaying unicode text. Unicode characters don't all
have the same width so we need helper functions for displaying them.
.. versionadded:: 0.2 kitchen.display API 1.0.0
'''
import itertools
import unicodedata
from kitchen.text.converters import to_unicode, to_bytes
from kitchen.text.exceptions import ControlCharError
# This is ported from ustr_utf8_* which I got from:
# http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# I've tried to leave it close to the original C (same names etc.) so that
# it is easy to read/compare both versions... James Antilles
#
# Reimplemented quite a bit of this for speed. Use the bzr log or annotate
# commands to see what I've changed since importing this file.-Toshio Kuratomi
# ----------------------------- BEG utf8 ------------------to-----------
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# [...]
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# Renamed but still pretty much JA's port of MK's code
def _interval_bisearch(value, table):
'''Binary search in an interval table.
:arg value: numeric value to search for
:arg table: Ordered list of intervals. This is a list of two-tuples. The
elements of the two-tuple define an interval's start and end points.
:returns: If :attr:`value` is found within an interval in the :attr:`table`
return :data:`True`. Otherwise, :data:`False`
This function checks whether a numeric value is present within a table
of intervals. It checks using a binary search algorithm, dividing the
list of values in half and checking against the values until it determines
whether the value is in the table.
'''
minimum = 0
maximum = len(table) - 1
if value < table[minimum][0] or value > table[maximum][1]:
return False
while maximum >= minimum:
mid = divmod(minimum + maximum, 2)[0]
if value > table[mid][1]:
minimum = mid + 1
elif value < table[mid][0]:
maximum = mid - 1
else:
return True
return False
_COMBINING = (
(0x300, 0x36f), (0x483, 0x489), (0x591, 0x5bd),
(0x5bf, 0x5bf), (0x5c1, 0x5c2), (0x5c4, 0x5c5),
(0x5c7, 0x5c7), (0x600, 0x603), (0x610, 0x61a),
(0x64b, 0x65f), (0x670, 0x670), (0x6d6, 0x6e4),
(0x6e7, 0x6e8), (0x6ea, 0x6ed), (0x70f, 0x70f),
(0x711, 0x711), (0x730, 0x74a), (0x7a6, 0x7b0),
(0x7eb, 0x7f3), (0x816, 0x819), (0x81b, 0x823),
(0x825, 0x827), (0x829, 0x82d), (0x859, 0x85b),
(0x8d4, 0x8e1), (0x8e3, 0x8ff), (0x901, 0x902),
(0x93c, 0x93c), (0x941, 0x948), (0x94d, 0x94d),
(0x951, 0x954), (0x962, 0x963), (0x981, 0x981),
(0x9bc, 0x9bc), (0x9c1, 0x9c4), (0x9cd, 0x9cd),
(0x9e2, 0x9e3), (0xa01, 0xa02), (0xa3c, 0xa3c),
(0xa41, 0xa42), (0xa47, 0xa48), (0xa4b, 0xa4d),
(0xa70, 0xa71), (0xa81, 0xa82), (0xabc, 0xabc),
(0xac1, 0xac5), (0xac7, 0xac8), (0xacd, 0xacd),
(0xae2, 0xae3), (0xb01, 0xb01), (0xb3c, 0xb3c),
(0xb3f, 0xb3f), (0xb41, 0xb43), (0xb4d, 0xb4d),
(0xb56, 0xb56), (0xb82, 0xb82), (0xbc0, 0xbc0),
(0xbcd, 0xbcd), (0xc3e, 0xc40), (0xc46, 0xc48),
(0xc4a, 0xc4d), (0xc55, 0xc56), (0xcbc, 0xcbc),
(0xcbf, 0xcbf), (0xcc6, 0xcc6), (0xccc, 0xccd),
(0xce2, 0xce3), (0xd41, 0xd43), (0xd4d, 0xd4d),
(0xdca, 0xdca), (0xdd2, 0xdd4), (0xdd6, 0xdd6),
(0xe31, 0xe31), (0xe34, 0xe3a), (0xe47, 0xe4e),
(0xeb1, 0xeb1), (0xeb4, 0xeb9), (0xebb, 0xebc),
(0xec8, 0xecd), (0xf18, 0xf19), (0xf35, 0xf35),
(0xf37, 0xf37), (0xf39, 0xf39), (0xf71, 0xf7e),
(0xf80, 0xf84), (0xf86, 0xf87), (0xf90, 0xf97),
(0xf99, 0xfbc), (0xfc6, 0xfc6), (0x102d, 0x1030),
(0x1032, 0x1032), (0x1036, 0x1037), (0x1039, 0x103a),
(0x1058, 0x1059), (0x108d, 0x108d), (0x1160, 0x11ff),
(0x135d, 0x135f), (0x1712, 0x1714), (0x1732, 0x1734),
(0x1752, 0x1753), (0x1772, 0x1773), (0x17b4, 0x17b5),
(0x17b7, 0x17bd), (0x17c6, 0x17c6), (0x17c9, 0x17d3),
(0x17dd, 0x17dd), (0x180b, 0x180d), (0x18a9, 0x18a9),
(0x1920, 0x1922), (0x1927, 0x1928), (0x1932, 0x1932),
(0x1939, 0x193b), (0x1a17, 0x1a18), (0x1a60, 0x1a60),
(0x1a75, 0x1a7c), (0x1a7f, 0x1a7f), (0x1ab0, 0x1abd),
(0x1b00, 0x1b03), (0x1b34, 0x1b34), (0x1b36, 0x1b3a),
(0x1b3c, 0x1b3c), (0x1b42, 0x1b42), (0x1b44, 0x1b44),
(0x1b6b, 0x1b73), (0x1baa, 0x1bab), (0x1be6, 0x1be6),
(0x1bf2, 0x1bf3), (0x1c37, 0x1c37), (0x1cd0, 0x1cd2),
(0x1cd4, 0x1ce0), (0x1ce2, 0x1ce8), (0x1ced, 0x1ced),
(0x1cf4, 0x1cf4), (0x1cf8, 0x1cf9), (0x1dc0, 0x1df5),
(0x1dfb, 0x1dff), (0x200b, 0x200f), (0x202a, 0x202e),
(0x2060, 0x2063), (0x206a, 0x206f), (0x20d0, 0x20f0),
(0x2cef, 0x2cf1), (0x2d7f, 0x2d7f), (0x2de0, 0x2dff),
(0x302a, 0x302f), (0x3099, 0x309a), (0xa66f, 0xa66f),
(0xa674, 0xa67d), (0xa69e, 0xa69f), (0xa6f0, 0xa6f1),
(0xa806, 0xa806), (0xa80b, 0xa80b), (0xa825, 0xa826),
(0xa8c4, 0xa8c4), (0xa8e0, 0xa8f1), (0xa92b, 0xa92d),
(0xa953, 0xa953), (0xa9b3, 0xa9b3), (0xa9c0, 0xa9c0),
(0xaab0, 0xaab0), (0xaab2, 0xaab4), (0xaab7, 0xaab8),
(0xaabe, 0xaabf), (0xaac1, 0xaac1), (0xaaf6, 0xaaf6),
(0xabed, 0xabed), (0xfb1e, 0xfb1e), (0xfe00, 0xfe0f),
(0xfe20, 0xfe2f), (0xfeff, 0xfeff), (0xfff9, 0xfffb),
(0x101fd, 0x101fd), (0x102e0, 0x102e0), (0x10376, 0x1037a),
(0x10a01, 0x10a03), (0x10a05, 0x10a06), (0x10a0c, 0x10a0f),
(0x10a38, 0x10a3a), (0x10a3f, 0x10a3f), (0x10ae5, 0x10ae6),
(0x11046, 0x11046), (0x1107f, 0x1107f), (0x110b9, 0x110ba),
(0x11100, 0x11102), (0x11133, 0x11134), (0x11173, 0x11173),
(0x111c0, 0x111c0), (0x111ca, 0x111ca), (0x11235, 0x11236),
(0x112e9, 0x112ea), (0x1133c, 0x1133c), (0x1134d, 0x1134d),
(0x11366, 0x1136c), (0x11370, 0x11374), (0x11442, 0x11442),
(0x11446, 0x11446), (0x114c2, 0x114c3), (0x115bf, 0x115c0),
(0x1163f, 0x1163f), (0x116b6, 0x116b7), (0x1172b, 0x1172b),
(0x11c3f, 0x11c3f), (0x16af0, 0x16af4), (0x16b30, 0x16b36),
(0x1bc9e, 0x1bc9e), (0x1d165, 0x1d169), (0x1d16d, 0x1d182),
(0x1d185, 0x1d18b), (0x1d1aa, 0x1d1ad), (0x1d242, 0x1d244),
(0x1e000, 0x1e006), (0x1e008, 0x1e018), (0x1e01b, 0x1e021),
(0x1e023, 0x1e024), (0x1e026, 0x1e02a), (0x1e8d0, 0x1e8d6),
(0x1e944, 0x1e94a), (0xe0001, 0xe0001), (0xe0020, 0xe007f),
(0xe0100, 0xe01ef), )
'''
Internal table, provided by this module to list :term:`code points` which
combine with other characters and therefore should have no :term:`textual
width`. This is a sorted :class:`tuple` of non-overlapping intervals. Each
interval is a :class:`tuple` listing a starting :term:`code point` and ending
:term:`code point`. Every :term:`code point` between the two end points is
a combining character.
.. seealso::
:func:`~kitchen.text.display._generate_combining_table`
for how this table is generated
This table was last regenerated on python-3.6.0-rc1 with
:data:`unicodedata.unidata_version` 9.0.0
'''
# New function from Toshio Kuratomi (LGPLv2+)
def _generate_combining_table():
'''Combine Markus Kuhn's data with :mod:`unicodedata` to make combining
char list
:rtype: :class:`tuple` of tuples
:returns: :class:`tuple` of intervals of :term:`code points` that are
combining character. Each interval is a 2-:class:`tuple` of the
starting :term:`code point` and the ending :term:`code point` for the
combining characters.
In normal use, this function serves to tell how we're generating the
combining char list. For speed reasons, we use this to generate a static
list and just use that later.
Markus Kuhn's list of combining characters is more complete than what's in
the python :mod:`unicodedata` library but the python :mod:`unicodedata` is
synced against later versions of the unicode database
This is used to generate the :data:`~kitchen.text.display._COMBINING`
table.
'''
# Marcus Kuhn's sorted list of non-overlapping intervals of non-spacing
# characters generated ifrom Unicode 5.0 data by:
# "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
markus_kuhn_combining_5_0 = (
(0x0300, 0x036F), (0x0483, 0x0486), (0x0488, 0x0489),
(0x0591, 0x05BD), (0x05BF, 0x05BF), (0x05C1, 0x05C2),
(0x05C4, 0x05C5), (0x05C7, 0x05C7), (0x0600, 0x0603),
(0x0610, 0x0615), (0x064B, 0x065E), (0x0670, 0x0670),
(0x06D6, 0x06E4), (0x06E7, 0x06E8), (0x06EA, 0x06ED),
(0x070F, 0x070F), (0x0711, 0x0711), (0x0730, 0x074A),
(0x07A6, 0x07B0), (0x07EB, 0x07F3), (0x0901, 0x0902),
(0x093C, 0x093C), (0x0941, 0x0948), (0x094D, 0x094D),
(0x0951, 0x0954), (0x0962, 0x0963), (0x0981, 0x0981),
(0x09BC, 0x09BC), (0x09C1, 0x09C4), (0x09CD, 0x09CD),
(0x09E2, 0x09E3), (0x0A01, 0x0A02), (0x0A3C, 0x0A3C),
(0x0A41, 0x0A42), (0x0A47, 0x0A48), (0x0A4B, 0x0A4D),
(0x0A70, 0x0A71), (0x0A81, 0x0A82), (0x0ABC, 0x0ABC),
(0x0AC1, 0x0AC5), (0x0AC7, 0x0AC8), (0x0ACD, 0x0ACD),
(0x0AE2, 0x0AE3), (0x0B01, 0x0B01), (0x0B3C, 0x0B3C),
(0x0B3F, 0x0B3F), (0x0B41, 0x0B43), (0x0B4D, 0x0B4D),
(0x0B56, 0x0B56), (0x0B82, 0x0B82), (0x0BC0, 0x0BC0),
(0x0BCD, 0x0BCD), (0x0C3E, 0x0C40), (0x0C46, 0x0C48),
(0x0C4A, 0x0C4D), (0x0C55, 0x0C56), (0x0CBC, 0x0CBC),
(0x0CBF, 0x0CBF), (0x0CC6, 0x0CC6), (0x0CCC, 0x0CCD),
(0x0CE2, 0x0CE3), (0x0D41, 0x0D43), (0x0D4D, 0x0D4D),
(0x0DCA, 0x0DCA), (0x0DD2, 0x0DD4), (0x0DD6, 0x0DD6),
(0x0E31, 0x0E31), (0x0E34, 0x0E3A), (0x0E47, 0x0E4E),
(0x0EB1, 0x0EB1), (0x0EB4, 0x0EB9), (0x0EBB, 0x0EBC),
(0x0EC8, 0x0ECD), (0x0F18, 0x0F19), (0x0F35, 0x0F35),
(0x0F37, 0x0F37), (0x0F39, 0x0F39), (0x0F71, 0x0F7E),
(0x0F80, 0x0F84), (0x0F86, 0x0F87), (0x0F90, 0x0F97),
(0x0F99, 0x0FBC), (0x0FC6, 0x0FC6), (0x102D, 0x1030),
(0x1032, 0x1032), (0x1036, 0x1037), (0x1039, 0x1039),
(0x1058, 0x1059), (0x1160, 0x11FF), (0x135F, 0x135F),
(0x1712, 0x1714), (0x1732, 0x1734), (0x1752, 0x1753),
(0x1772, 0x1773), (0x17B4, 0x17B5), (0x17B7, 0x17BD),
(0x17C6, 0x17C6), (0x17C9, 0x17D3), (0x17DD, 0x17DD),
(0x180B, 0x180D), (0x18A9, 0x18A9), (0x1920, 0x1922),
(0x1927, 0x1928), (0x1932, 0x1932), (0x1939, 0x193B),
(0x1A17, 0x1A18), (0x1B00, 0x1B03), (0x1B34, 0x1B34),
(0x1B36, 0x1B3A), (0x1B3C, 0x1B3C), (0x1B42, 0x1B42),
(0x1B6B, 0x1B73), (0x1DC0, 0x1DCA), (0x1DFE, 0x1DFF),
(0x200B, 0x200F), (0x202A, 0x202E), (0x2060, 0x2063),
(0x206A, 0x206F), (0x20D0, 0x20EF), (0x302A, 0x302F),
(0x3099, 0x309A), (0xA806, 0xA806), (0xA80B, 0xA80B),
(0xA825, 0xA826), (0xFB1E, 0xFB1E), (0xFE00, 0xFE0F),
(0xFE20, 0xFE23), (0xFEFF, 0xFEFF), (0xFFF9, 0xFFFB),
(0x10A01, 0x10A03), (0x10A05, 0x10A06), (0x10A0C, 0x10A0F),
(0x10A38, 0x10A3A), (0x10A3F, 0x10A3F), (0x1D167, 0x1D169),
(0x1D173, 0x1D182), (0x1D185, 0x1D18B), (0x1D1AA, 0x1D1AD),
(0x1D242, 0x1D244), (0xE0001, 0xE0001), (0xE0020, 0xE007F),
(0xE0100, 0xE01EF))
combining = []
in_interval = False
interval = []
for codepoint in xrange(0, 0xFFFFF + 1):
if _interval_bisearch(codepoint, markus_kuhn_combining_5_0) or \
unicodedata.combining(unichr(codepoint)):
if not in_interval:
# Found first part of an interval
interval = [codepoint]
in_interval = True
else:
if in_interval:
in_interval = False
interval.append(codepoint - 1)
combining.append(interval)
if in_interval:
# If we're at the end and the interval is open, close it.
# :W0631: We looped through a static range so we know codepoint is
# defined here
#pylint:disable-msg=W0631
interval.append(codepoint)
combining.append(interval)
return tuple(itertools.imap(tuple, combining))
# New function from Toshio Kuratomi (LGPLv2+)
def _print_combining_table():
'''Print out a new :data:`_COMBINING` table
This will print a new :data:`_COMBINING` table in the format used in
:file:`kitchen/text/display.py`. It's useful for updating the
:data:`_COMBINING` table with updated data from a new python as the format
won't change from what's already in the file.
'''
table = _generate_combining_table()
entries = 0
print '_COMBINING = ('
for pair in table:
if entries >= 3:
entries = 0
print
if entries == 0:
print ' ',
entries += 1
entry = '(0x%x, 0x%x),' % pair
print entry,
print ')'
# Handling of control chars rewritten. Rest is JA's port of MK's C code.
# -Toshio Kuratomi
def _ucp_width(ucs, control_chars='guess'):
'''Get the :term:`textual width` of a ucs character
:arg ucs: integer representing a single unicode :term:`code point`
:kwarg control_chars: specify how to deal with :term:`control characters`.
Possible values are:
:guess: (default) will take a guess for :term:`control character`
widths. Most codes will return zero width. ``backspace``,
``delete``, and ``clear delete`` return -1. ``escape`` currently
returns -1 as well but this is not guaranteed as it's not always
correct
:strict: will raise :exc:`~kitchen.text.exceptions.ControlCharError`
if a :term:`control character` is encountered
:raises ControlCharError: if the :term:`code point` is a unicode
:term:`control character` and :attr:`control_chars` is set to 'strict'
:returns: :term:`textual width` of the character.
.. note::
It's important to remember this is :term:`textual width` and not the
number of characters or bytes.
'''
# test for 8-bit control characters
if ucs < 32 or (ucs < 0xa0 and ucs >= 0x7f):
# Control character detected
if control_chars == 'strict':
raise ControlCharError('_ucp_width does not understand how to'
' assign a width value to control characters.')
if ucs in (0x08, 0x07F, 0x94):
# Backspace, delete, and clear delete remove a single character
return -1
if ucs == 0x1b:
# Excape is tricky. It removes some number of characters that
# come after it but the amount is dependent on what is
# interpreting the code.
# So this is going to often be wrong but other values will be
# wrong as well.
return -1
# All other control characters get 0 width
return 0
if _interval_bisearch(ucs, _COMBINING):
# Combining characters return 0 width as they will be combined with
# the width from other characters
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return (1 +
(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd))))
# Wholly rewritten by me (LGPLv2+) -Toshio Kuratomi
def textual_width(msg, control_chars='guess', encoding='utf-8',
errors='replace'):
'''Get the :term:`textual width` of a string
:arg msg: :class:`unicode` string or byte :class:`str` to get the width of
:kwarg control_chars: specify how to deal with :term:`control characters`.
Possible values are:
:guess: (default) will take a guess for :term:`control character`
widths. Most codes will return zero width. ``backspace``,
``delete``, and ``clear delete`` return -1. ``escape`` currently
returns -1 as well but this is not guaranteed as it's not always
correct
:strict: will raise :exc:`kitchen.text.exceptions.ControlCharError`
if a :term:`control character` is encountered
:kwarg encoding: If we are given a byte :class:`str` this is used to
decode it into :class:`unicode` string. Any characters that are not
decodable in this encoding will get a value dependent on the
:attr:`errors` parameter.
:kwarg errors: How to treat errors encoding the byte :class:`str` to
:class:`unicode` string. Legal values are the same as for
:func:`kitchen.text.converters.to_unicode`. The default value of
``replace`` will cause undecodable byte sequences to have a width of
one. ``ignore`` will have a width of zero.
:raises ControlCharError: if :attr:`msg` contains a :term:`control
character` and :attr:`control_chars` is ``strict``.
:returns: :term:`Textual width` of the :attr:`msg`. This is the amount of
space that the string will consume on a monospace display. It's
measured in the number of cell positions or columns it will take up on
a monospace display. This is **not** the number of glyphs that are in
the string.
.. note::
This function can be wrong sometimes because Unicode does not specify
a strict width value for all of the :term:`code points`. In
particular, we've found that some Tamil characters take up to four
character cells but we return a lesser amount.
'''
# On python 2.6.4, x86_64, I've benchmarked a few alternate
# implementations::
#
# timeit.repeat('display.textual_width(data)',
# 'from __main__ import display, data', number=100)
# I varied data by size and content (1MB of ascii, a few words, 43K utf8,
# unicode type
#
# :this implementation: fastest across the board
#
# :list comprehension: 6-16% slower
# return sum([_ucp_width(ord(c), control_chars=control_chars)
# for c in msg])
#
# :generator expression: 9-18% slower
# return sum((_ucp_width(ord(c), control_chars=control_chars) for c in
# msg))
#
# :lambda: 10-19% slower
# return sum(itertools.imap(lambda x: _ucp_width(ord(x), control_chars),
# msg))
#
# :partial application: 13-22% slower
# func = functools.partial(_ucp_width, control_chars=control_chars)
# return sum(itertools.imap(func, itertools.imap(ord, msg)))
#
# :the original code: 4-38% slower
# The 4% was for the short, ascii only string. All the other pieces of
# data yielded over 30% slower times.
# Non decodable data is just assigned a single cell width
msg = to_unicode(msg, encoding=encoding, errors=errors)
# Add the width of each char
return sum(
# calculate width of each char
itertools.starmap(_ucp_width,
# Setup the arguments to _ucp_width
itertools.izip(
# int value of each char
itertools.imap(ord, msg),
# control_chars arg in a form that izip will deal with
itertools.repeat(control_chars))))
# Wholly rewritten by me -Toshio Kuratomi
def textual_width_chop(msg, chop, encoding='utf-8', errors='replace'):
'''Given a string, return it chopped to a given :term:`textual width`
:arg msg: :class:`unicode` string or byte :class:`str` to chop
:arg chop: Chop :attr:`msg` if it exceeds this :term:`textual width`
:kwarg encoding: If we are given a byte :class:`str`, this is used to
decode it into a :class:`unicode` string. Any characters that are not
decodable in this encoding will be assigned a width of one.
:kwarg errors: How to treat errors encoding the byte :class:`str` to
:class:`unicode`. Legal values are the same as for
:func:`kitchen.text.converters.to_unicode`
:rtype: :class:`unicode` string
:returns: :class:`unicode` string of the :attr:`msg` chopped at the given
:term:`textual width`
This is what you want to use instead of ``%.*s``, as it does the "right"
thing with regard to :term:`UTF-8` sequences, :term:`control characters`,
and characters that take more than one cell position. Eg::
>>> # Wrong: only displays 8 characters because it is operating on bytes
>>> print "%.*s" % (10, 'café ñunru!')
café ñun
>>> # Properly operates on graphemes
>>> '%s' % (textual_width_chop('café ñunru!', 10))
café ñunru
>>> # takes too many columns because the kanji need two cell positions
>>> print '1234567890\\n%.*s' % (10, u'一二三四五六七八九十')
1234567890
一二三四五六七八九十
>>> # Properly chops at 10 columns
>>> print '1234567890\\n%s' % (textual_width_chop(u'一二三四五六七八九十', 10))
1234567890
一二三四五
'''
msg = to_unicode(msg, encoding=encoding, errors=errors)
width = textual_width(msg)
if width <= chop:
return msg
maximum = len(msg)
if maximum > chop * 2:
# A character can take at most 2 cell positions so this is the actual
# maximum
maximum = chop * 2
minimum = 0
eos = maximum
if eos > chop:
eos = chop
width = textual_width(msg[:eos])
while True:
# if current width is high,
if width > chop:
# calculate new midpoint
mid = minimum + (eos - minimum) // 2
if mid == eos:
break
if (eos - chop) < (eos - mid):
while width > chop:
width = width - _ucp_width(ord(msg[eos-1]))
eos -= 1
return msg[:eos]
# subtract distance between eos and mid from width
width = width - textual_width(msg[mid:eos])
maximum = eos
eos = mid
# if current width is low,
elif width < chop:
# Note: at present, the if (eos - chop) < (eos - mid):
# short-circuit above means that we never use this branch.
# calculate new midpoint
mid = eos + (maximum - eos) // 2
if mid == eos:
break
if (chop - eos) < (mid - eos):
while width < chop:
new_width = _ucp_width(ord(msg[eos]))
width = width + new_width
eos += 1
return msg[:eos]
# add distance between eos and new mid to width
width = width + textual_width(msg[eos:mid])
minimum = eos
eos = mid
if eos > maximum:
eos = maximum
break
# if current is just right
else:
return msg[:eos]
return msg[:eos]
# I made some adjustments for using unicode but largely unchanged from JA's
# port of MK's code -Toshio
def textual_width_fill(msg, fill, chop=None, left=True, prefix='', suffix=''):
'''Expand a :class:`unicode` string to a specified :term:`textual width`
or chop to same
:arg msg: :class:`unicode` string to format
:arg fill: pad string until the :term:`textual width` of the string is
this length
:kwarg chop: before doing anything else, chop the string to this length.
Default: Don't chop the string at all
:kwarg left: If :data:`True` (default) left justify the string and put the
padding on the right. If :data:`False`, pad on the left side.
:kwarg prefix: Attach this string before the field we're filling
:kwarg suffix: Append this string to the end of the field we're filling
:rtype: :class:`unicode` string
:returns: :attr:`msg` formatted to fill the specified width. If no
:attr:`chop` is specified, the string could exceed the fill length
when completed. If :attr:`prefix` or :attr:`suffix` are printable
characters, the string could be longer than the fill width.
.. note::
:attr:`prefix` and :attr:`suffix` should be used for "invisible"
characters like highlighting, color changing escape codes, etc. The
fill characters are appended outside of any :attr:`prefix` or
:attr:`suffix` elements. This allows you to only highlight
:attr:`msg` inside of the field you're filling.
.. warning::
:attr:`msg`, :attr:`prefix`, and :attr:`suffix` should all be
representable as unicode characters. In particular, any escape
sequences in :attr:`prefix` and :attr:`suffix` need to be convertible
to :class:`unicode`. If you need to use byte sequences here rather
than unicode characters, use
:func:`~kitchen.text.display.byte_string_textual_width_fill` instead.
This function expands a string to fill a field of a particular
:term:`textual width`. Use it instead of ``%*.*s``, as it does the
"right" thing with regard to :term:`UTF-8` sequences, :term:`control
characters`, and characters that take more than one cell position in
a display. Example usage::
>>> msg = u'一二三四五六七八九十'
>>> # Wrong: This uses 10 characters instead of 10 cells:
>>> u":%-*.*s:" % (10, 10, msg[:9])
:一二三四五六七八九 :
>>> # This uses 10 cells like we really want:
>>> u":%s:" % (textual_width_fill(msg[:9], 10, 10))
:一二三四五:
>>> # Wrong: Right aligned in the field, but too many cells
>>> u"%20.10s" % (msg)
一二三四五六七八九十
>>> # Correct: Right aligned with proper number of cells
>>> u"%s" % (textual_width_fill(msg, 20, 10, left=False))
一二三四五
>>> # Wrong: Adding some escape characters to highlight the line but too many cells
>>> u"%s%20.10s%s" % (prefix, msg, suffix)
u'\x1b[7m 一二三四五六七八九十\x1b[0m'
>>> # Correct highlight of the line
>>> u"%s%s%s" % (prefix, display.textual_width_fill(msg, 20, 10, left=False), suffix)
u'\x1b[7m 一二三四五\x1b[0m'
>>> # Correct way to not highlight the fill
>>> u"%s" % (display.textual_width_fill(msg, 20, 10, left=False, prefix=prefix, suffix=suffix))
u' \x1b[7m一二三四五\x1b[0m'
'''
msg = to_unicode(msg)
if chop is not None:
msg = textual_width_chop(msg, chop)
width = textual_width(msg)
if width >= fill:
if prefix or suffix:
msg = u''.join([prefix, msg, suffix])
else:
extra = u' ' * (fill - width)
if left:
msg = u''.join([prefix, msg, suffix, extra])
else:
msg = u''.join([extra, prefix, msg, suffix])
return msg
def _textual_width_le(width, *args):
'''Optimize the common case when deciding which :term:`textual width` is
larger
:arg width: :term:`textual width` to compare against.
:arg \*args: :class:`unicode` strings to check the total :term:`textual
width` of
:returns: :data:`True` if the total length of :attr:`args` are less than
or equal to :attr:`width`. Otherwise :data:`False`.
We often want to know "does X fit in Y". It takes a while to use
:func:`textual_width` to calculate this. However, we know that the number
of canonically composed :class:`unicode` characters is always going to
have 1 or 2 for the :term:`textual width` per character. With this we can
take the following shortcuts:
1) If the number of canonically composed characters is more than width,
the true :term:`textual width` cannot be less than width.
2) If the number of canonically composed characters * 2 is less than the
width then the :term:`textual width` must be ok.
:term:`textual width` of a canonically composed :class:`unicode` string
will always be greater than or equal to the the number of :class:`unicode`
characters. So we can first check if the number of composed
:class:`unicode` characters is less than the asked for width. If it is we
can return :data:`True` immediately. If not, then we must do a full
:term:`textual width` lookup.
'''
string = ''.join(args)
string = unicodedata.normalize('NFC', string)
if len(string) > width:
return False
elif len(string) * 2 <= width:
return True
elif len(to_bytes(string)) <= width:
# Check against bytes.
# utf8 has the property of having the same amount or more bytes per
# character than textual width.
return True
else:
true_width = textual_width(string)
return true_width <= width
def wrap(text, width=70, initial_indent=u'', subsequent_indent=u'',
encoding='utf-8', errors='replace'):
'''Works like we want :func:`textwrap.wrap` to work,
:arg text: :class:`unicode` string or byte :class:`str` to wrap
:kwarg width: :term:`textual width` at which to wrap. Default: 70
:kwarg initial_indent: string to use to indent the first line. Default:
do not indent.
:kwarg subsequent_indent: string to use to wrap subsequent lines.
Default: do not indent
:kwarg encoding: Encoding to use if :attr:`text` is a byte :class:`str`
:kwarg errors: error handler to use if :attr:`text` is a byte :class:`str`
and contains some undecodable characters.
:rtype: :class:`list` of :class:`unicode` strings
:returns: list of lines that have been text wrapped and indented.
:func:`textwrap.wrap` from the |stdlib|_ has two drawbacks that this
attempts to fix:
1. It does not handle :term:`textual width`. It only operates on bytes or
characters which are both inadequate (due to multi-byte and double
width characters).
2. It malforms lists and blocks.
'''
# Tested with:
# yum info robodoc gpicview php-pear-Net-Socket wmctrl ustr moreutils
# mediawiki-HNP ocspd insight yum mousepad
# ...at 120, 80 and 40 chars.
# Also, notable among lots of others, searching for "\n ":
# exim-clamav, jpackage-utils, tcldom, synaptics, "quake3",
# perl-Class-Container, ez-ipupdate, perl-Net-XMPP, "kipi-plugins",
# perl-Apache-DBI, netcdf, python-configobj, "translate-toolkit", alpine,
# "udunits", "conntrack-tools"
#
# Note that, we "fail" on:
# alsa-plugins-jack, setools*, dblatex, uisp, "perl-Getopt-GUI-Long",
# suitesparse, "synce-serial", writer2latex, xenwatch, ltsp-utils
def _indent_at_beg(line):
'''Return the indent to use for this and (possibly) subsequent lines
:arg line: :class:`unicode` line of text to process
:rtype: tuple
:returns: tuple of count of whitespace before getting to the start of
this line followed by a count to the following indent if this
block of text is an entry in a list.
'''
# Find the first non-whitespace character
try:
char = line.strip()[0]
except IndexError:
# All whitespace
return 0, 0
else:
count = line.find(char)
# if we have a bullet character, check for list
if char not in u'-*.o\u2022\u2023\u2218':
# No bullet; not a list
return count, 0
# List: Keep searching until we hit the innermost list
nxt = _indent_at_beg(line[count+1:])
nxt = nxt[1] or nxt[0]
if nxt:
return count, count + 1 + nxt
return count, 0
initial_indent = to_unicode(initial_indent, encoding=encoding,
errors=errors)
subsequent_indent = to_unicode(subsequent_indent, encoding=encoding,
errors=errors)
subsequent_indent_width = textual_width(subsequent_indent)
text = to_unicode(text, encoding=encoding, errors=errors).rstrip(u'\n')
lines = text.expandtabs().split(u'\n')
ret = []
indent = initial_indent
wrap_last = False
cur_sab = 0
cur_spc_indent = 0
for line in lines:
line = line.rstrip(u' ')
(last_sab, last_spc_indent) = (cur_sab, cur_spc_indent)
(cur_sab, cur_spc_indent) = _indent_at_beg(line)
force_nl = False # We want to stop wrapping under "certain" conditions:
if wrap_last and cur_spc_indent: # if line starts a list or
force_nl = True
if wrap_last and cur_sab == len(line):# is empty line
force_nl = True
if wrap_last and not last_spc_indent: # if we don't continue a list
if cur_sab >= 4 and cur_sab != last_sab: # and is "block indented"
force_nl = True
if force_nl:
ret.append(indent.rstrip(u' '))
indent = subsequent_indent
wrap_last = False
if cur_sab == len(line): # empty line, remove spaces to make it easier.
line = u''
if wrap_last:
line = line.lstrip(u' ')
cur_spc_indent = last_spc_indent
if _textual_width_le(width, indent, line):
wrap_last = False
ret.append(indent + line)
indent = subsequent_indent
continue
wrap_last = True
words = line.split(u' ')
line = indent
spcs = cur_spc_indent
if not spcs and cur_sab >= 4:
spcs = cur_sab
for word in words:
if (not _textual_width_le(width, line, word) and
textual_width(line) > subsequent_indent_width):
ret.append(line.rstrip(u' '))
line = subsequent_indent + u' ' * spcs
line += word
line += u' '
indent = line.rstrip(u' ') + u' '
if wrap_last:
ret.append(indent.rstrip(u' '))
return ret
def fill(text, *args, **kwargs):
'''Works like we want :func:`textwrap.fill` to work
:arg text: :class:`unicode` string or byte :class:`str` to process
:returns: :class:`unicode` string with each line separated by a newline
.. seealso::
:func:`kitchen.text.display.wrap`
for other parameters that you can give this command.
This function is a light wrapper around :func:`kitchen.text.display.wrap`.
Where that function returns a :class:`list` of lines, this function
returns one string with each line separated by a newline.
'''
return u'\n'.join(wrap(text, *args, **kwargs))
#
# Byte strings
#
def byte_string_textual_width_fill(msg, fill, chop=None, left=True, prefix='',
suffix='', encoding='utf-8', errors='replace'):
'''Expand a byte :class:`str` to a specified :term:`textual width` or chop
to same
:arg msg: byte :class:`str` encoded in :term:`UTF-8` that we want formatted
:arg fill: pad :attr:`msg` until the :term:`textual width` is this long
:kwarg chop: before doing anything else, chop the string to this length.
Default: Don't chop the string at all
:kwarg left: If :data:`True` (default) left justify the string and put the
padding on the right. If :data:`False`, pad on the left side.
:kwarg prefix: Attach this byte :class:`str` before the field we're
filling
:kwarg suffix: Append this byte :class:`str` to the end of the field we're
filling
:rtype: byte :class:`str`
:returns: :attr:`msg` formatted to fill the specified :term:`textual
width`. If no :attr:`chop` is specified, the string could exceed the
fill length when completed. If :attr:`prefix` or :attr:`suffix` are
printable characters, the string could be longer than fill width.
.. note::
:attr:`prefix` and :attr:`suffix` should be used for "invisible"
characters like highlighting, color changing escape codes, etc. The
fill characters are appended outside of any :attr:`prefix` or
:attr:`suffix` elements. This allows you to only highlight
:attr:`msg` inside of the field you're filling.
.. seealso::
:func:`~kitchen.text.display.textual_width_fill`
For example usage. This function has only two differences.
1. it takes byte :class:`str` for :attr:`prefix` and
:attr:`suffix` so you can pass in arbitrary sequences of
bytes, not just unicode characters.
2. it returns a byte :class:`str` instead of a :class:`unicode`
string.
'''
prefix = to_bytes(prefix, encoding=encoding, errors=errors)
suffix = to_bytes(suffix, encoding=encoding, errors=errors)
if chop is not None:
msg = textual_width_chop(msg, chop, encoding=encoding, errors=errors)
width = textual_width(msg)
msg = to_bytes(msg)
if width >= fill:
if prefix or suffix:
msg = ''.join([prefix, msg, suffix])
else:
extra = ' ' * (fill - width)
if left:
msg = ''.join([prefix, msg, suffix, extra])
else:
msg = ''.join([extra, prefix, msg, suffix])
return msg
__all__ = ('byte_string_textual_width_fill', 'fill', 'textual_width',
'textual_width_chop', 'textual_width_fill', 'wrap')
|
fedora-infra/kitchen
|
kitchen2/kitchen/text/display.py
|
Python
|
lgpl-2.1
| 40,137
|
[
"NetCDF"
] |
ed22fe11db84bedca4511113813d2b6fa947abb978d0efb68b3c0a9acf6c6c8d
|
class ASTVisitor():
def visit(self, astnode):
'A read-only function which looks at a single AST node.'
pass
def return_value(self):
return None
class ASTModVisitor(ASTVisitor):
'''A visitor class that can also construct a new, modified AST.
Two methods are offered: the normal visit() method, which focuses on analyzing
and/or modifying a single node; and the post_visit() method, which allows you
to modify the child list of a node.
The default implementation does nothing; it simply builds up itself, unmodified.'''
def visit(self, astnode):
# Note that this overrides the super's implementation, because we need a
# non-None return value.
return astnode
def post_visit(self, visit_value, child_values):
'''A function which constructs a return value out of its children.
This can be used to modify an AST by returning a different or modified
ASTNode than the original. The top-level return value will then be the
new AST.'''
return visit_value
class ASTNode(object):
def __init__(self):
self.parent = None
self._children = []
@property
def children(self):
return self._children
@children.setter
def children(self, children):
self._children = children
for child in children:
child.parent = self
def pprint(self,indent=''):
"""
Recursively prints a formatted string representation of the AST.
Parameters
----------
"""
print(indent + self.__class__.__name__)
indent = indent + ' '
for child in self._children:
child.pprint(indent)
def walk(self, visitor):
"""
Traverses an AST, calling visitor.visit() on every node.
This is a depth-first, pre-order traversal. Parents will be visited before
any children, children will be visited in order, and (by extension) a node's
children will all be visited before its siblings.
The visitor may modify attributes, but may not add or delete nodes.
Parameters
----------
visitor : ASTVisitor
visitor for a single AST node
"""
visitor.visit(self)
for child in self.children:
child.walk(visitor)
return visitor.return_value()
def mod_walk(self, mod_visitor):
'''Traverses an AST, building up a return value from visitor methods.
Similar to walk(), but constructs a return value from the result of
postvisit() calls. This can be used to modify an AST by building up the
desired new AST with return values.'''
selfval = mod_visitor.visit(self)
child_values = [child.mod_walk(mod_visitor) for child in self.children]
retval = mod_visitor.post_visit(self, selfval, child_values)
return retval
class ASTProgram(ASTNode):
def __init__(self, statements):
super().__init__()
self.children = statements
class ASTImport(ASTNode):
def __init__(self, mod):
super().__init__()
self.mod = mod
@property
def module(self):
return self.mod
class ASTComponent(ASTNode):
def __init__(self, name, expressions):
"""
Initialize an ASTComponent node with name and expressions
Parameters
----------
name : ASTID
ASTID node repreesnts the name of ASTComponent
expressions : list
list of AST Expression nodes
"""
super().__init__()
expressions.insert(0,ASTID(name))
self.children=expressions
@property
def name(self):
return self.children[0]
@property
def expressions(self):
return self.children[1:]
class ASTInputExpr(ASTNode):
def __init__(self, declarations):
super().__init__()
self.children=declarations
class ASTOutputExpr(ASTNode):
def __init__(self, declarations):
super().__init__()
self.children=declarations
class ASTAssignmentExpr(ASTNode):
def __init__(self, binding, value):
"""
Initialize an ASTAssignmentExpr node with binding and value
Parameters
----------
binding : ASTID
ASTID node represents binding of ASTAssignmentExpr node
value : ASTID or ASTLiteral
ASTID node represents the binded value
"""
super().__init__()
self.children=[ASTID(binding),value]
@property
def binding(self):
return self.children[0]
@property
def value(self):
return self.children[1:]
class ASTEvalExpr(ASTNode):
def __init__(self, op, args):
"""
Initialize an ASTEvalExpr node with op and args
Parameters
----------
op : ASTID
ASTID node represents the operation
args : list of ASTID or ASTLiteral
list of ASTID or ASTLiteral represents the args
"""
super().__init__()
self.children.append(op)
for arg in args:
self.children.append(arg)
@property
def op(self):
return self.children[0]
@property
def args(self):
return self.children[1:]
# These are already complete.
class ASTID(ASTNode):
def __init__(self, name, typedecl=None):
"""
Initialize an ASTID node with name and type declaration
Parameters
----------
name : string
name of ASTID node
typedecl : str
data type of ASTID node
"""
super().__init__()
self.name = name
self.type = typedecl
class ASTLiteral(ASTNode):
def __init__(self, value):
"""
Initialize an ASTLiteral node with value
Parameters
----------
value : number or string
value of ASTLiteral node
"""
super().__init__()
self.value = value
self.type = 'Scalar'
|
cs207-project/TimeSeries
|
pype/ast.py
|
Python
|
mit
| 5,420
|
[
"VisIt"
] |
dc5ed9baa6f651f760ab74dfd5e17c23d262545ef5d62420ac48f7a69c41675e
|
#! /usr/bin/env python
"""Usage: AsapFileToTrajectory.py oldfile newfile [frame1 [frame2]]
Converts an Asap version 1.x netcdf file to an ASE version 2
trajectory file. If a single frame number is given, only that frame
is converted. If two frame numbers are given, all frame from the
first to the last are converted (-1 means the last frame). If no
frame numbers are given, all frames are converted.
"""
import Numeric as num
from Scientific.IO.NetCDF import NetCDFFile
import sys
import types
old_names = {
'cartesianPositions': 'CartesianPositions',
'cartesianMomenta': 'CartesianMomenta',
'basisVectors': 'UnitCell',
'classes': 'Tags',
'atomicNumbers': 'AtomicNumbers',
'periodic': 'BoundaryConditions'
}
new_names = {
# name shape typecode once units
# -----------------------------------------------------------------
'CartesianVelocities': (('natoms', 3), num.Float, False, (2, -0.5)),
'CartesianPositions': (('natoms', 3), num.Float, False, (1, 0)),
'CartesianMomenta': (('natoms', 3), num.Float, False, (2, -0.5)),
'CartesianForces': (('natoms', 3), num.Float, False, (-1, 1)),
'Stress': ((3, 3), num.Float, False, (-3, 1)),
'UnitCell': ((3, 3), num.Float, False, (1, 0)),
'BoundaryConditions': ((3,), num.Int, True , (0, 0)),
'PotentialEnergy': ((), num.Float, False, (0, 1)),
'AtomicNumbers': (('natoms',), num.Int, True, (0, 0)),
'MagneticMoments': (('natoms',), num.Float, True, (0, 0)),
'Tags': (('natoms',), num.Int, True, (0, 0))}
def normalize(fr, nfr, default):
if fr == None:
fr = default
if fr < 0:
fr = nfr + fr
if fr < 0:
raise ValueError, "Frame number before beginning of file."
if fr > nfr:
raise ValueError, "Frame number after end of file: " + str(fr) + " > " + str(nfr)
return fr
def AsapFileToTrajectory(oldfile, newfile, firstframe=None, lastframe=None):
# Check if input file is a filename or a NetCDF file
if isinstance(oldfile, types.StringTypes):
oldfile = NetCDFFile(oldfile)
pos = oldfile.variables['cartesianPositions'] # Must be present
(nframes, natoms, three) = pos.shape
print natoms, three, nframes
firstframe = normalize(firstframe, nframes, 0)
lastframe = normalize(lastframe, nframes, -1)
if lastframe < firstframe:
raise ValueError, "No frames to copy, giving up."
print "Preparing to copy frames", firstframe, "to", lastframe
# Now open the output file, and define the variables.
if isinstance(newfile, types.StringTypes):
newfile = NetCDFFile(newfile, "w")
oncevars = []
manyvars = []
for v in oldfile.variables.keys():
try:
newname = old_names[v]
except KeyError:
print "WARNING: Skipping data named", v
continue
if new_names[newname][2]:
shape = new_names[newname][0]
oncevars.append((v, newname))
else:
shape = ("unlim",) + new_names[newname][0]
manyvars.append((v, newname))
shape2 = []
for d in shape:
if isinstance(d, types.IntType):
n = d
d = str(d)
elif d == 'natoms':
n = natoms
elif d == 'unlim':
n = None
else:
raise RuntimeError, "Unknown dimension "+str(d)
if not newfile.dimensions.has_key(d):
newfile.createDimension(d, n)
shape2.append(d)
print v, "-->", newname, " shape", shape2
var = newfile.createVariable(newname, oldfile.variables[v].typecode(),
tuple(shape2))
var.once = new_names[newname][2]
var.units = new_names[newname][3]
# Now copy the data
print "Copying global data"
newfile.history = 'ASE trajectory'
newfile.version = '0.1'
newfile.lengthunit = 'Ang'
newfile.energyunit = 'eV'
for oldname, newname in oncevars:
newfile.variables[newname][:] = oldfile.variables[oldname][:]
for n in range(firstframe, lastframe+1):
print "Copying frame", n
for oldname, newname in manyvars:
newfile.variables[newname][n] = oldfile.variables[oldname][n]
newfile.close()
if __name__ == "__main__":
# sys.tracebacklimit = 0
if len(sys.argv) < 3 or len(sys.argv) > 5:
print __doc__
raise TypeError, "Wrong number of arguments."
infile = sys.argv[1]
outfile = sys.argv[2]
first = last = None
if len(sys.argv) > 3:
first = last = int(sys.argv[3])
if len(sys.argv) > 4:
last = int(sys.argv[4])
AsapFileToTrajectory(infile, outfile, first, last)
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/Tools/AsapFileToTrajectory.py
|
Python
|
mit
| 4,899
|
[
"ASE",
"NetCDF"
] |
7db528cf68e4c0b0328e187e0ced91aa0b06983cb301f5ac39b378d9ef17fdbc
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from warnings import warn
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied, LinAlgWarning
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, Hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('shapes of a {} and b {} are incompatible'
.format(a1.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
c_or_cr, b, check_finite, keep_b_shape=True)
# Form a 1-D array of values to be used in the matrix, containing a
# reversed copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Shapes of c {} and b {} are incompatible'
.format(c.shape, b.shape))
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if b is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition ``U @ S @ V`` in the economy mode and picking
up only the columns/rows that are associated with significant singular
values.
If ``s`` is the maximum singular value of ``a``, then the
significance cut-off value is determined by ``atol + rtol * s``. Any
singular value below this value is assumed insignificant.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``max(M, N) * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> B = linalg.pinv(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(s)
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
rank = np.sum(s > val)
u = u[:, :rank]
u /= s[:rank]
B = (u @ vh[:rank]).conj().T
if return_rank:
return B, rank
else:
return B
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
`scipy.linalg.pinv2` is deprecated since SciPy 1.7.0, use
`scipy.linalg.pinv` instead for better tolerance control.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values; singular values smaller than this
value are considered as zero. If both are omitted, the default value
``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the
machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
"""
# SciPy 1.7.0 2021-04-10
warn('scipy.linalg.pinv2 is deprecated since SciPy 1.7.0, use '
'scipy.linalg.pinv instead', DeprecationWarning, stacklevel=2)
if rcond is not None:
cond = rcond
return pinv(a=a, atol=cond, rtol=None, return_rank=return_rank,
check_finite=check_finite)
def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
check_finite=True, cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a copmlex Hermitian/real symmetric
matrix using its eigenvalue decomposition and including all eigenvalues
with 'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``N * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue algorithm does not converge.
Examples
--------
>>> from scipy.linalg import pinvh
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(np.abs(s))
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
above_cutoff = (abs(s) > val)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = (u * psigma_diag) @ u.conj().T
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, :doi:`10.1007/BF02165404`
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, :arxiv:`1401.5766`
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape: bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square: bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape
def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
"""Efficient Toeplitz Matrix-Matrix Multiplication using FFT
This function returns the matrix multiplication between a Toeplitz
matrix and a dense matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
x : (M,) or (M, K) array_like
Matrix with which to multiply.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
workers : int, optional
To pass to scipy.fft.fft and ifft. Maximum number of workers to use
for parallel computation. If negative, the value wraps around from
``os.cpu_count()``. See scipy.fft.fft for more details.
Returns
-------
T @ x : (M,) or (M, K) ndarray
The result of the matrix multiplication ``T @ x``. Shape of return
matches shape of `x`.
See Also
--------
toeplitz : Toeplitz matrix
solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
Notes
-----
The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
to efficiently calculate the matrix-matrix product.
Because the computation is based on the FFT, integer inputs will
result in floating point outputs. This is unlike NumPy's `matmul`,
which preserves the data type of the input.
This is partly based on the implementation that can be found in [1]_,
licensed under the MIT license. More information about the method can be
found in reference [2]_. References [3]_ and [4]_ have more reference
implementations in Python.
.. versionadded:: 1.6.0
References
----------
.. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
Gaussian Process Inference with GPU Acceleration" with contributions
from Max Balandat and Ruihan Wu. Available online:
https://github.com/cornellius-gp/gpytorch
.. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
.. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
package for audio room simulations and array processing algorithms,
Proc. IEEE ICASSP, Calgary, CA, 2018.
https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
pyroomacoustics/adaptive/util.py
.. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
the Seismological Society of America., January, 2017. Vol. 107(1),
pp. 276-291.
Examples
--------
Multiply the Toeplitz matrix T with matrix x::
[ 1 -1 -2 -3] [1 10]
T = [ 3 1 -1 -2] x = [2 11]
[ 6 3 1 -1] [2 11]
[10 6 3 1] [5 19]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
>>> from scipy.linalg import toeplitz, matmul_toeplitz
>>> matmul_toeplitz((c, r), x)
array([[-20., -80.],
[ -7., -8.],
[ 9., 85.],
[ 33., 218.]])
Check the result by creating the full Toeplitz matrix and
multiplying it by ``x``.
>>> toeplitz(c, r) @ x
array([[-20, -80],
[ -7, -8],
[ 9, 85],
[ 33, 218]])
The full matrix is never formed explicitly, so this routine
is suitable for very large Toeplitz matrices.
>>> n = 1000000
>>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
array([1., 1., 1., ..., 1., 1., 1.])
"""
from ..fft import fft, ifft, rfft, irfft
r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
n, m = x.shape
T_nrows = len(c)
T_ncols = len(r)
p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
embedded_col = np.concatenate((c, r[-1:0:-1]))
if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = fft(x, n=p, axis=0, workers=workers)
mat_times_x = ifft(fft_mat*fft_x, axis=0,
workers=workers)[:T_nrows, :]
else:
# Real inputs; using rfft is faster
fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = rfft(x, n=p, axis=0, workers=workers)
mat_times_x = irfft(fft_mat*fft_x, axis=0,
workers=workers, n=p)[:T_nrows, :]
return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
return mat_times_x.reshape(*return_shape)
|
WarrenWeckesser/scipy
|
scipy/linalg/basic.py
|
Python
|
bsd-3-clause
| 67,094
|
[
"Gaussian"
] |
7b26113bd184e50a751252119b04932ea046cf8f358d28fb30b1b9c1ec177f6d
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import ValidationError
from psi4.driver import qcdb
# CONVENTIONS:
# n_ at the start of a variable name is short for "number of."
# _pi at the end of a variable name is short for "per irrep."
# h is the index of an irrep.
array_format = {"precision": 10}
def _displace_cart(mol, geom, salc_list, i_m, step_size):
"""Displace a geometry along the specified displacement SALCs.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to displace
geom : ndarray
(nat, 3) reference geometry [a0] of the molecule (const).
salc_list : :py:class:`~psi4.core.CdSalcList`
A list of Cartesian displacement SALCs
i_m : iterator of tuples
An iterator containing tuples. Each tuple has the index of a salc in
salc_list and the number of steps (positive or negative) to displace
the salc at that index.
step_size : float
The size of a single "step," i.e., the stencil size.
Returns
------
label : str
Displacement label for the metadata dictionary.
"""
label = ""
# This for loop and tuple unpacking is why the function can handle
# an arbitrary number of SALCs.
for salc_index, disp_steps in i_m:
# * Python error if iterate through `salc_list`
for i in range(len(salc_list[salc_index])):
component = salc_list[salc_index][i]
geom[component.atom, component.xyz] += (
disp_steps * step_size * component.coef / np.sqrt(mol.mass(component.atom)))
# salc_index is in descending order. We want the label in ascending order, so...
# ...add the new label part from the left of the string, not the right.
label = "{:d}: {:d}".format(salc_index, disp_steps) + (", " if label else "") + label
return label
def _initialize_findif(mol, freq_irrep_only, mode, initialize_string, verbose=0):
"""Perform initialization tasks needed by all primary functions.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to displace
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
mode : {"1_0", "2_0", "2_1"}
The first number specifies the derivative level determined from
displacements, and the second number is the level determined at.
initialize_string : function
A function that returns the string to print to show the caller was entered.
The string is both caller-specific and dependent on values determined
in this function.
verbose : int
Set to 0 to silence extra print information, regardless of the print level.
Used so the information is printed only during geometry generation, and not
during the derivative computation as well.
Returns
-------
data : dict
Miscellaneous information required by callers.
"""
core.print_out("\n ----------------------------------------------------------\n")
core.print_out(" FINDIF\n")
core.print_out(" R. A. King and Jonathon Misiewicz\n")
core.print_out(" ---------------------------------------------------------\n\n")
print_lvl = core.get_option("FINDIF", "PRINT")
num_pts = core.get_option("FINDIF", "POINTS")
disp_size = core.get_option("FINDIF", "DISP_SIZE")
data = {"print_lvl": print_lvl, "num_pts": num_pts, "disp_size": disp_size}
if print_lvl:
core.print_out(initialize_string(data))
# Get settings for CdSalcList, then get the CdSalcList.
method_allowed_irreps = 0x1 if mode == "1_0" else 0xFF
t_project = not core.get_global_option("EXTERN") and (not core.get_global_option("PERTURB_H"))
# core.get_option returns an int, but CdSalcList expect a bool, so re-cast
r_project = t_project and bool(core.get_option("FINDIF", "FD_PROJECT"))
salc_list = core.CdSalcList(mol, method_allowed_irreps, t_project, r_project)
n_atom = mol.natom()
n_irrep = salc_list.nirrep()
n_salc = salc_list.ncd()
if print_lvl and verbose:
core.print_out(f" Number of atoms is {n_atom}.\n")
if method_allowed_irreps != 0x1:
core.print_out(f" Number of irreps is {n_irrep}.\n")
core.print_out(" Number of {!s}SALCs is {:d}.\n".format(
"" if method_allowed_irreps != 0x1 else "symmetric ", n_salc))
core.print_out(" Translations projected? {:d}. Rotations projected? {:d}.\n".format(t_project, r_project))
# TODO: Replace with a generator from a stencil to a set of points.
# Diagonal displacements differ between the totally symmetric irrep, compared to all others.
# Off-diagonal displacements are the same for both.
pts_dict = {
3: {
"sym_irr": ((-1, ), (1, )),
"asym_irr": ((-1, ), ),
"off": ((1, 1), (-1, -1))
},
5: {
"sym_irr": ((-2, ), (-1, ), (1, ), (2, )),
"asym_irr": ((-2, ), (-1, )),
"off": ((-1, -2), (-2, -1), (-1, -1), (1, -1), (-1, 1), (1, 1), (2, 1), (1, 2))
}
}
if num_pts not in pts_dict:
raise ValidationError("FINDIF: Invalid number of points!")
# Convention: x_pi means x_per_irrep. The ith element is x for irrep i, with Cotton ordering.
salc_indices_pi = [[] for h in range(n_irrep)]
# Validate that we have an irrep matching the user-specified irrep, if any.
try:
salc_indices_pi[freq_irrep_only]
except (TypeError, IndexError):
if freq_irrep_only != -1:
raise ValidationError("FINDIF: Irrep value not in valid range.")
# Populate salc_indices_pi for all irreps.
# * Python error if iterate through `salc_list`
for i in range(len(salc_list)):
salc_indices_pi[salc_list[i].irrep_index()].append(i)
# If the method allows more than one irrep, print how the irreps partition the SALCS.
if print_lvl and method_allowed_irreps != 0x1 and verbose:
core.print_out(" Index of SALCs per irrep:\n")
for h in range(n_irrep):
if print_lvl > 1 or freq_irrep_only in {h, -1}:
tmp = (" {:d} " * len(salc_indices_pi[h])).format(*salc_indices_pi[h])
core.print_out(" {:d} : ".format(h + 1) + tmp + "\n")
core.print_out(" Number of SALCs per irrep:\n")
for h in range(n_irrep):
if print_lvl > 1 or freq_irrep_only in {h, -1}:
core.print_out(" Irrep {:d}: {:d}\n".format(h + 1, len(salc_indices_pi[h])))
# Now that we've printed the SALCs, clear any that are not of user-specified symmetry.
if freq_irrep_only != -1:
for h in range(n_irrep):
if h != freq_irrep_only:
salc_indices_pi[h].clear()
n_disp_pi = []
disps = pts_dict[num_pts] # We previously validated num_pts in pts_dict.
for irrep, indices in enumerate(salc_indices_pi):
n_disp = len(indices) * len(disps["asym_irr" if irrep != 0 else "sym_irr"])
if mode == "2_0":
# Either len(indices) or len(indices)-1 is even, so dividing by two is safe.
n_disp += len(indices) * (len(indices) - 1) // 2 * len(disps["off"])
n_disp_pi.append(n_disp)
# Let's print out the number of geometries, the displacement multiplicity, and the CdSALCs!
if print_lvl and verbose:
core.print_out(" Number of geometries (including reference) is {:d}.\n".format(sum(n_disp_pi) + 1))
if method_allowed_irreps != 0x1:
core.print_out(" Number of displacements per irrep:\n")
for i, ndisp in enumerate(n_disp_pi, start=1):
core.print_out(f" Irrep {i}: {ndisp}\n")
if print_lvl > 1 and verbose:
for i in range(len(salc_list)):
salc_list[i].print_out()
data.update({
"n_disp_pi": n_disp_pi,
"n_irrep": n_irrep,
"n_salc": n_salc,
"n_atom": n_atom,
"salc_list": salc_list,
"salc_indices_pi": salc_indices_pi,
"disps": disps,
"project_translations": t_project,
"project_rotations": r_project
})
return data
def _geom_generator(mol, freq_irrep_only, mode):
"""
Generate geometries for the specified molecule and derivative levels.
You probably want to instead use one of the convenience functions:
gradient_from_energies_geometries, hessian_from_energies_geometries,
hessian_from_gradients_geometries.
Parameters
----------
mol : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule on which to perform a finite difference calculation.
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
mode : {"1_0", "2_0", "2_1"}
The first number specifies the targeted derivative level. The
second number is the compute derivative level. E.g., "2_0"
is hessian from energies.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified below.
The dictionary makes findifrec _extensible_. If you need a new field
in the record, just add it.
All fields should be present at all times, with two exceptions:
1. Fields for computed quantities will not be available until
after they are computed.
2. Displacement specific overrides for globals will not be
available unless the user specified the overrides.
(Such overrides are not implemented at time of writing. An example
is giving a displacement its own step dict.)
step : dict
A descriptor for the finite difference step.
In future, this can be overriden by step fields for individual displacements.
units : {'Bohr'}
The units for the displacement. The code currently assumes "bohr," per MolSSI standards.
size : float
The step size for the displacement.
stencil_size : {3, 5}
Number of points to evaluate at for each displacement basis vector. Count
includes the central reference point.
displacement_space : {'CdSalc'}
A string specifying the vector space in which displacements are performed.
Currently, only CdSalc is supported.
project_translations : bool
Whether translations are to be projected out of the displacements.
project_rotations : bool
Whether rotations are to be projected out of the displacements.
molecule : dict
The reference molecule, in MolSSI schema. See
https://molssi-qc-schema.readthedocs.io/en/latest/auto_topology.html
displacements : dict
A dictionary mapping labels specifying the displacement to data about
the geometry. Labels are of the form "A: a, B: b" where A and B index the
basis vector in displacement space and A < B, and a and b index the step
magnitude. For instance, "0: 1, 1: -1" specifies displacing +1 in
displacement vector 0 and -1 in displacement vector 1. "1: -1, 0: 1" is
forbidden for breaking ordering. Generalizes to arbitrary numbers of
simultaneous displacements in the obvious way.
The possible geometry data is as follows:
geometry: list of floats
(3 * nat) The molecular geometry as a flat list in bohr. All coordinates
are given for one atom before proceeding to the next atom.
energy: int
The last computed electronic energy at the geometry.
gradient: list of floats
(3 * nat) The last computed gradient of energy with respect to changes in
geometry at the geometry, as a flat list. All coordinates are given for
displacing one atom before proceeding to the next atom.
reference : dict
A geometry data dict, as described above, for the reference geometry.
"""
msg_dict = {
"1_0":
"energies to determine gradients",
"2_1":
"gradients to determine vibrational frequencies and \n"
" normal modes. Resulting frequencies are only valid at stationary points",
"2_0":
"gradients to determine vibrational frequencies and \n"
" normal modes. Resulting frequencies are only valid at stationary points"
}
try:
print_msg = msg_dict[mode]
except KeyError:
raise ValidationError("FINDIF: Mode {} not recognized.".format(mode))
def init_string(data):
return (" Using finite-differences of {:s}.\n"
" Generating geometries for use with {:d}-point formula.\n"
" Displacement size will be {:6.2e}.\n".format(print_msg, data["num_pts"], data["disp_size"]))
# Genuine support for qcdb molecules would be nice. But that requires qcdb CdSalc tech.
# Until then, silently swap the qcdb molecule out for a psi4.core.molecule.
if isinstance(mol, qcdb.Molecule):
mol = core.Molecule.from_dict(mol.to_dict())
data = _initialize_findif(mol, freq_irrep_only, mode, init_string, 1)
# We can finally start generating displacements.
ref_geom = mol.geometry().clone()
# Now we generate the metadata...
findifrec = {
"step": {
"units": "bohr",
"size": data["disp_size"]
},
"stencil_size": data["num_pts"],
"displacement_space": "CdSALC",
"project_translations": data["project_translations"],
"project_rotations": data["project_rotations"],
"molecule": mol.to_schema(dtype=1, units='Bohr'),
"displacements": {},
"reference": {}
}
def append_geoms(indices, steps):
"""Given a list of indices and a list of steps to displace each, append the corresponding geometry to the list."""
new_geom = ref_geom.clone().np
# Next, to make this salc/magnitude composite.
index_steps = zip(indices, steps)
label = _displace_cart(mol, new_geom, data["salc_list"], index_steps, data["disp_size"])
if data["print_lvl"] > 2:
core.print_out("\nDisplacement '{}'\n{}\n".format(label, np.array_str(new_geom, **array_format)))
findifrec["displacements"][label] = {"geometry": new_geom.ravel().tolist()}
for h in range(data["n_irrep"]):
active_indices = data["salc_indices_pi"][h]
for index in active_indices:
# Displace along the diagonal.
# Remember that the totally symmetric irrep has special displacements.
for val in data["disps"]["sym_irr" if h == 0 else "asym_irr"]:
append_geoms((index, ), val)
# Hessian from energies? We have off-diagonal displacements to worry about.
if mode == "2_0":
# i indexes SALC indices of the current irrep.
for i, index in enumerate(active_indices):
for index2 in active_indices[:i]:
for val in data["disps"]["off"]:
append_geoms((index, index2), val)
if data["print_lvl"] > 2:
core.print_out("\nReference\n{}\n".format(np.array_str(ref_geom.np, **array_format)))
findifrec["reference"]["geometry"] = ref_geom.np.ravel().tolist()
if data["print_lvl"] > 1:
core.print_out("\n-------------------------------------------------------------\n")
return findifrec
def assemble_gradient_from_energies(findifrec):
"""Compute the gradient by finite difference of energies.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
Returns
-------
gradient : ndarray
(nat, 3) Cartesian gradient [Eh/a0].
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
def init_string(data):
return (" Computing gradient from energies.\n"
" Using {:d}-point formula.\n"
" Energy without displacement: {:15.10f}\n"
" Check energies below for precision!\n"
" Forces are for mass-weighted, symmetry-adapted cartesians (in au).\n".format(
findifrec["stencil_size"], findifrec["reference"]["energy"]))
data = _initialize_findif(mol, -1, "1_0", init_string)
salc_indices = data["salc_indices_pi"][0]
# Extract the energies, and turn then into an ndarray for easy manipulating
# E(i, j) := Energy on displacing the ith SALC we care about in the jth step
# Steps are ordered, for example, -2, -1, 1, 2
max_disp = (findifrec["stencil_size"] - 1) // 2 # The numerator had better be divisible by two.
e_per_salc = 2 * max_disp
E = np.zeros((len(salc_indices), e_per_salc))
for i, salc_index in enumerate(salc_indices):
for j in range(1, max_disp + 1):
E[i, max_disp - j] = findifrec["displacements"][f"{salc_index}: {-j}"]["energy"]
E[i, max_disp + j - 1] = findifrec["displacements"][f"{salc_index}: {j}"]["energy"]
# Perform the finite difference.
if findifrec["stencil_size"] == 3:
g_q = (E[:, 1] - E[:, 0]) / (2.0 * findifrec["step"]["size"])
elif findifrec["stencil_size"] == 5:
g_q = (E[:, 0] - 8.0 * E[:, 1] + 8.0 * E[:, 2] - E[:, 3]) / (12.0 * findifrec["step"]["size"])
else: # This error SHOULD have already been caught, but just in case...
raise ValidationError("FINDIF: {} is an invalid number of points.".format(findifrec["stencil_size"]))
g_q = np.asarray(g_q)
if data["print_lvl"]:
energy_string = ""
for i in range(1, max_disp + 1):
energy_string = f"Energy(-{i}) " + energy_string + f"Energy(+{i}) "
core.print_out("\n Coord " + energy_string + " Force\n")
for salc in range(data["n_salc"]):
print_str = " {:5d}" + " {:17.10f}" * (e_per_salc) + " {force:17.10f}" + "\n"
energies = E[salc]
core.print_out(print_str.format(salc, force=g_q[salc], *energies))
core.print_out("\n")
# Transform the gradient from mass-weighted SALCs to non-mass-weighted Cartesians
B = data["salc_list"].matrix()
g_cart = np.dot(g_q, B)
g_cart = g_cart.reshape(data["n_atom"], 3)
massweighter = np.array([mol.mass(a) for a in range(data["n_atom"])])**(0.5)
g_cart = (g_cart.T * massweighter).T
if data["print_lvl"]:
core.print_out("\n-------------------------------------------------------------\n")
return g_cart
def _process_hessian_symmetry_block(H_block, B_block, massweighter, irrep, print_lvl):
"""Perform post-construction processing for a symmetry block of the Hessian.
Statements need to be printed, and the Hessian must be made orthogonal.
Parameters
---------
H_block : ndarray
A block of the Hessian for an irrep, in mass-weighted salcs.
Dimensions # cdsalcs by # cdsalcs.
B_block : ndarray
A block of the B matrix for an irrep, which transforms CdSalcs to Cartesians.
Dimensions # cdsalcs by # cartesians.
massweighter : ndarray
The mass associated with each atomic coordinate.
Dimension # cartesians. Due to x, y, z, values appear in groups of three.
irrep : str
A string identifying the irrep H_block and B_block are of.
print_lvl : int
The level of printing information requested by the user.
Returns
-------
H_block : ndarray
H_block, but made into an orthogonal array.
"""
# Symmetrize our Hessian block.
# The symmetric structure is lost due to errors in the computation
H_block = (H_block + H_block.T) / 2.0
if print_lvl >= 3:
core.print_out("\n Force Constants for irrep {} in mass-weighted, ".format(irrep))
core.print_out("symmetry-adapted cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(H_block, **array_format)))
evals, evects = np.linalg.eigh(H_block)
# Get our eigenvalues and eigenvectors in descending order.
idx = evals.argsort()[::-1]
evals = evals[idx]
evects = evects[:, idx]
normal_irr = np.dot((B_block * massweighter).T, evects)
if print_lvl >= 2:
core.print_out("\n Normal coordinates (non-mass-weighted) for irrep {}:\n".format(irrep))
core.print_out("\n{}\n".format(np.array_str(normal_irr, **array_format)))
return H_block
def _process_hessian(H_blocks, B_blocks, massweighter, print_lvl):
"""Perform post-construction processing for the Hessian.
Statements need to be printed, and the Hessian must be transformed.
Parameters
----------
H_blocks : list of ndarray
A list of blocks of the Hessian per irrep, in mass-weighted salcs.
Each is dimension # cdsalcs-in-irrep by # cdsalcs-in-irrep.
B_blocks : list of ndarray
A block of the B matrix per irrep, which transforms CdSalcs to Cartesians.
Each is dimensions # cdsalcs-in-irrep by # cartesians.
massweighter : ndarray
The mass associated with each atomic coordinate.
Dimension 3 * natom. Due to x, y, z, values appear in groups of three.
print_lvl : int
The level of printing information requested by the user.
Returns
-------
Hx : ndarray
The Hessian in non-mass weighted cartesians.
"""
# We have the Hessian in each irrep! The final task is to perform coordinate transforms.
H = p4util.block_diagonal_array(*H_blocks)
B = np.vstack(B_blocks)
if print_lvl >= 3:
core.print_out("\n Force constant matrix for all computed irreps in mass-weighted SALCS.\n")
core.print_out("\n{}\n".format(np.array_str(H, **array_format)))
# Transform the massweighted Hessian from the CdSalc basis to Cartesians.
# The Hessian is the matrix not of a linear transformation, but of a (symmetric) bilinear form
# As such, the change of basis is formula A' = Xt A X, no inverses!
# More conceptually, it's A'_kl = A_ij X_ik X_jl; Each index transforms linearly.
Hx = np.dot(np.dot(B.T, H), B)
if print_lvl >= 3:
core.print_out("\n Force constants in mass-weighted Cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(Hx, **array_format)))
# Un-massweight the Hessian.
Hx = np.transpose(Hx / massweighter) / massweighter
if print_lvl >= 3:
core.print_out("\n Force constants in Cartesian coordinates.\n")
core.print_out("\n{}\n".format(np.array_str(Hx, **array_format)))
if print_lvl:
core.print_out("\n-------------------------------------------------------------\n")
return Hx
def assemble_hessian_from_gradients(findifrec, freq_irrep_only):
"""Compute the Hessian by finite difference of gradients.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
freq_irrep_only : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
hessian : ndarray
(3 * nat, 3 * nat) Cartesian Hessian [Eh/a0^2]
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
displacements = findifrec["displacements"]
def init_string(data):
return (" Computing second-derivative from gradients using projected, \n"
" symmetry-adapted, cartesian coordinates.\n\n"
" {:d} gradients passed in, including the reference geometry.\n".format(len(displacements) + 1))
data = _initialize_findif(mol, freq_irrep_only, "2_1", init_string)
# For non-totally symmetric CdSALCs, a symmetry operation can convert + and - displacements.
# Good News: By taking advantage of that, we (potentially) ran less computations.
# Bad News: We need to find the - displacements from the + computations now.
# The next ~80 lines of code are dedicated to that task.
if data["print_lvl"]:
core.print_out(" Generating complete list of displacements from unique ones.\n\n")
pg = mol.point_group()
ct = pg.char_table()
order = pg.order()
# Determine what atoms map to what other atoms under the point group operations.
# The py-side compute_atom_map will work whether mol is a Py-side or C-side object.
atom_map = qcdb.compute_atom_map(mol)
if data["print_lvl"] >= 3:
core.print_out(" The atom map:\n")
for atom, sym_image_list in enumerate(atom_map):
core.print_out(f" {atom + 1:d} : ")
for image_atom in sym_image_list:
core.print_out(f"{image_atom + 1:4d}")
core.print_out("\n")
core.print_out("\n")
# A list of lists of gradients, per irrep
gradients_pi = [[]]
# Extract and print the symmetric gradients. These need no additional processing.
max_disp = (findifrec["stencil_size"] - 1) // 2 # The numerator had better be divisible by two.
for i in data["salc_indices_pi"][0]:
for n in range(-max_disp, 0):
grad_raw = displacements[f"{i}: {n}"]["gradient"]
gradients_pi[0].append(np.reshape(grad_raw, (-1, 3)))
for n in range(1, max_disp + 1):
grad_raw = displacements[f"{i}: {n}"]["gradient"]
gradients_pi[0].append(np.reshape(grad_raw, (-1, 3)))
if data["print_lvl"] >= 3:
core.print_out(" Symmetric gradients\n")
for gradient in gradients_pi[0]:
core.print_out("\n{}\n".format(np.array_str(gradient, **array_format)))
# Asymmetric gradient. There's always SOME operation that transforms a positive
# into a negative displacement.By doing extra things here, we can find the
# gradients at the positive displacements.
for h in range(1, data["n_irrep"]):
# If there are no CdSALCs in this irrep, let's skip it.
if not data["n_disp_pi"][h]:
gradients_pi.append([])
continue
gamma = ct.gamma(h)
if data["print_lvl"] >= 3:
core.print_out(f"Characters for irrep {h}\n")
for group_op in range(order):
core.print_out(" {:5.1f}".format(gamma.character(group_op)))
core.print_out("\n")
# Find the group operation that converts + to - displacements.
for group_op in range(order):
if gamma.character(group_op) == -1:
break
else:
raise ValidationError("A symmetric gradient passed for a non-symmetric one.")
if data["print_lvl"]:
core.print_out(" Operation {} takes plus displacements of irrep {} to minus ones.\n".format(
group_op + 1, gamma.symbol()))
sym_op = np.array(ct.symm_operation(group_op).matrix())
gradients = []
def recursive_gradients(i, n):
"""Populate gradients, with step -n, -n+1, ... -1, 1, ... n. Positive displacements are computed."""
grad_raw = displacements[f"{i}: {-n}"]["gradient"]
gradients.append(np.reshape(grad_raw, (-1, 3)))
new_grad = np.zeros((data["n_atom"], 3))
for atom, image in enumerate(atom_map):
atom2 = image[group_op]
new_grad[atom2] = np.einsum("xy,y->x", sym_op, gradients[-1][atom])
if n > 1:
recursive_gradients(i, n - 1)
gradients.append(new_grad)
for i in data["salc_indices_pi"][h]:
recursive_gradients(i, max_disp)
gradients_pi.append(gradients)
# Massweight all gradients.
# Remember, the atom currently corresponds to our 0 axis, hence these transpose tricks.
massweighter = np.asarray([mol.mass(a) for a in range(data["n_atom"])])**(-0.5)
gradients_pi = [[(grad.T * massweighter).T for grad in gradients] for gradients in gradients_pi]
if data["print_lvl"] >= 3:
core.print_out(" All mass-weighted gradients\n")
for gradients in gradients_pi:
for grad in gradients:
core.print_out("\n{}\n".format(np.array_str(grad, **array_format)))
# We have all our gradients generated now!
# Next, time to get our Hessian.
H_pi = []
B_pi = []
irrep_lbls = mol.irrep_labels()
massweighter = np.repeat(massweighter, 3)
for h in range(data["n_irrep"]):
n_disp = data["n_disp_pi"][h]
Nindices = len(data["salc_indices_pi"][h])
gradients = gradients_pi[h]
if not Nindices:
continue
# Flatten each gradient, and turn it into a COLUMN of the matrix.
gradient_matrix = np.array([grad.flatten() for grad in gradients]).T
# Transform disps from Cartesian to CdSalc coordinates.
# For future convenience, we transpose.
# Rows are gradients and columns are coordinates with respect to a particular CdSALC.
B_pi.append(data["salc_list"].matrix_irrep(h))
grads_adapted = np.dot(B_pi[-1], gradient_matrix).T
if data["print_lvl"] >= 3:
core.print_out("Gradients in B-matrix coordinates\n")
for disp in range(n_disp):
core.print_out(f" disp {disp}: ")
for salc in grads_adapted[disp]:
core.print_out(f"{salc:15.10f}")
core.print_out("\n")
H_pi.append(np.empty([Nindices, Nindices]))
if findifrec["stencil_size"] == 3:
H_pi[-1] = (grads_adapted[1::2] - grads_adapted[::2]) / (2.0 * findifrec["step"]["size"])
elif findifrec["stencil_size"] == 5:
H_pi[-1] = (grads_adapted[::4] - 8 * grads_adapted[1::4] + 8 * grads_adapted[2::4] -
grads_adapted[3::4]) / (12.0 * findifrec["step"]["size"])
H_pi[-1] = _process_hessian_symmetry_block(H_pi[-1], B_pi[-1], massweighter, irrep_lbls[h], data["print_lvl"])
# All blocks of the Hessian are now constructed!
return _process_hessian(H_pi, B_pi, massweighter, data["print_lvl"])
def assemble_hessian_from_energies(findifrec, freq_irrep_only):
"""Compute the Hessian by finite difference of energies.
Parameters
----------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
freq_irrep_only : int
The 0-indexed Cotton ordered irrep to get frequencies for. Choose -1 for all irreps.
Returns
-------
hessian : ndarray
(3 * nat, 3 * nat) Cartesian Hessian [Eh/a0^2].
"""
# This *must* be a Psi molecule at present - CdSalcList generation panics otherwise
mol = core.Molecule.from_schema(findifrec["molecule"], nonphysical=True, verbose=0)
displacements = findifrec["displacements"]
ref_energy = findifrec["reference"]["energy"]
def init_string(data):
max_label_len = str(max([len(label) for label in displacements]))
out_str = ""
for label, disp_data in displacements.items():
out_str += (" {:" + max_label_len + "s} : {:20.10f}\n").format(label, disp_data["energy"])
return (" Computing second-derivative from energies using projected, \n"
" symmetry-adapted, cartesian coordinates.\n\n"
" {:d} energies passed in, including the reference geometry.\n"
" Using {:d}-point formula.\n"
" Energy without displacement: {:15.10f}\n"
" Check energies below for precision!\n{}".format(
len(displacements) + 1, findifrec["stencil_size"], ref_energy, out_str))
data = _initialize_findif(mol, freq_irrep_only, "2_0", init_string)
massweighter = np.repeat([mol.mass(a) for a in range(data["n_atom"])], 3)**(-0.5)
B_pi = []
H_pi = []
irrep_lbls = mol.irrep_labels()
max_disp = (findifrec["stencil_size"] - 1) // 2
e_per_diag = 2 * max_disp
# Unlike in the gradient case, we have no symmetry transformations to worry about.
# We get to the task directly: assembling the force constants in each irrep block.
for h in range(data["n_irrep"]):
salc_indices = data["salc_indices_pi"][h]
if not salc_indices: continue
n_salcs = len(salc_indices)
E = np.zeros((len(salc_indices), e_per_diag))
# Step One: Diagonals
# For asymmetric irreps, the energy at a + disp is the same as at a - disp
# Just reuse the - disp energy for the + disp energy
for i, salc_index in enumerate(salc_indices):
for j in range(1, max_disp + 1):
E[i, max_disp - j] = displacements[f"{salc_index}: {-j}"]["energy"]
k = -j if h else j # Because of the +- displacement trick
E[i, max_disp + j - 1] = displacements[f"{salc_index}: {k}"]["energy"]
# Now determine all diagonal force constants for this irrep.
if findifrec["stencil_size"] == 3:
diag_fcs = E[:, 0] + E[:, 1]
diag_fcs -= 2 * ref_energy
diag_fcs /= (findifrec["step"]["size"]**2)
elif findifrec["stencil_size"] == 5:
diag_fcs = -E[:, 0] + 16 * E[:, 1] + 16 * E[:, 2] - E[:, 3]
diag_fcs -= 30 * ref_energy
diag_fcs /= (12 * findifrec["step"]["size"]**2)
H_irr = np.diag(diag_fcs)
# TODO: It's a bit ugly to use the salc indices to grab the off-diagonals but the indices
# within the irrep to grab the diagonals. Is there a better way to do this?
# Step Two: Off-diagonals
# We need off-diagonal energies, diagonal energies, AND the reference energy
# Grabbing off-diagonal energies is a pain, so once we know our SALCs...
# ...define offdiag_en to do that for us.
for i, salc in enumerate(salc_indices):
for j, salc2 in enumerate(salc_indices[:i]):
offdiag_en = lambda index: displacements["{l}: {}, {k}: {}".format(k=salc, l=salc2, *data["disps"]["off"][index])]["energy"]
if findifrec["stencil_size"] == 3:
fc = (+offdiag_en(0) + offdiag_en(1) + 2 * ref_energy - E[i][0] - E[i][1] - E[j][0] - E[j][1]) / (
2 * findifrec["step"]["size"]**2)
elif findifrec["stencil_size"] == 5:
fc = (-offdiag_en(0) - offdiag_en(1) + 9 * offdiag_en(2) - offdiag_en(3) - offdiag_en(4) +
9 * offdiag_en(5) - offdiag_en(6) - offdiag_en(7) + E[i][0] - 7 * E[i][1] - 7 * E[i][2] +
E[i][3] + E[j][0] - 7 * E[j][1] - 7 * E[j][2] + E[j][3] + 12 * ref_energy) / (
12 * findifrec["step"]["size"]**2)
H_irr[i, j] = fc
H_irr[j, i] = fc
B_pi.append(data["salc_list"].matrix_irrep(h))
H_pi.append(_process_hessian_symmetry_block(H_irr, B_pi[-1], massweighter, irrep_lbls[h], data["print_lvl"]))
# All blocks of the Hessian are now constructed!
return _process_hessian(H_pi, B_pi, massweighter, data["print_lvl"])
def gradient_from_energies_geometries(molecule):
"""
Generate geometries for a gradient by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the gradient of.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
Notes
-----
Only symmetric displacements are necessary, so user specification of
symmetry is disabled.
"""
return _geom_generator(molecule, -1, "1_0")
def hessian_from_gradients_geometries(molecule, irrep):
"""
Generate geometries for a hessian by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the frequencies of.
irrep : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
"""
return _geom_generator(molecule, irrep, "2_1")
def hessian_from_energies_geometries(molecule, irrep):
"""
Generate geometries for a hessian by finite difference of energies.
Parameters
----------
molecule : qcdb.molecule or :py:class:`~psi4.core.Molecule`
The molecule to compute the frequencies of.
irrep : int
The Cotton ordered irrep to get frequencies for. Choose -1 for all
irreps.
Returns
-------
findifrec : dict
Dictionary of finite difference data, specified in _geom_generator docstring.
"""
return _geom_generator(molecule, irrep, "2_0")
|
lothian/psi4
|
psi4/driver/driver_findif.py
|
Python
|
lgpl-3.0
| 38,143
|
[
"Psi4"
] |
1f4ac51b3d2a8f7b2f7071f21ffdcc48dafbfada83719a188704bcd14d0d18b7
|
#!/usr/bin/env python
import unittest
from MooseDocs.testing import MarkdownTestCase
class TestMarkdownExtensions(MarkdownTestCase):
"""
Tests that the 'moosedocs.yml' configuration file sets up the markdown conversion, including
that the basic (i.e., non-MOOSE) extensions are working.
"""
def testConvert(self):
md = "Testing"
html = self.parser.convert(md)
self.assertEqual(html, '<p>Testing</p>')
def testToc(self):
md = "[TOC]\n# Section One\n\n#Section Two"
self.assertConvert('test_toc.html', md)
def testAbbreviations(self):
md = "The HTML specification\n" \
"is maintained by the W3C.\n\n" \
"*[HTML]: Hyper Text Markup Language\n" \
"*[W3C]: World Wide Web Consortium"
self.assertConvert('test_abbreviations.html', md)
def testAttributeLists(self):
md = "This is a paragraph.\n" \
"{: #an_id .a_class }\n\n" \
"A setext style header {: #setext}\n" \
"=================================\n\n" \
"### A hash style header ### {: #hash }\n\n" \
'[link](http://example.com){: class="foo bar" title="Some title!" }'
self.assertConvert('test_attributelists.html', md)
def testDefinitionLists(self):
md = "Apple\n" \
": Pomaceous fruit of plants of the genus Malus in\n" \
" the family Rosaceae.\n\n" \
"Orange\n" \
": The fruit of an evergreen tree of the genus Citrus."
self.assertConvert('test_definitionlists.html', md)
def testFencedCodeBlocks(self):
md = "~~~~{.python}\n" \
"# python code\n" \
"~~~~\n\n" \
"~~~~.html\n" \
"<p>HTML Document</p>\n" \
"~~~~"
self.assertConvert('test_fencedcodeblocks.html', md)
@unittest.skip("^ in markdown causes trouble")
def testFootnotes(self):
md = r"Footnotes[^1] have a label[^@#$%] and the footnote's content.\n\n" \
r"[^1]: This is a footnote content.\n" \
r'[^@#$%]: A footnote on the label: "@#$%".'
self.assertConvert('test_footnotes.html', md)
def testTables(self):
md = "First Header | Second Header\n" \
"------------- | -------------\n" \
"Content Cell | Content Cell\n" \
"Content Cell | Content Cell"
self.assertConvert('test_tables.html', md)
def testSmartStrong(self):
md = '__this__works__too__.'
html = self.parser.convert(md)
self.assertEqual(html, u'<p><strong>this__works__too</strong>.</p>')
def testAdmonition(self):
md = "!!! note\n" \
" You should note that the title will be automatically capitalized."
self.assertConvert('test_admonition.html', md)
def testSmarty(self):
html = self.parser.convert("'foo'")
self.assertEqual(u'<p>‘foo’</p>', html)
html = self.parser.convert('"foo"')
self.assertEqual(u'<p>“foo”</p>', html)
html = self.parser.convert('...')
self.assertEqual(u'<p>…</p>', html)
html = self.parser.convert('--')
self.assertEqual(u'<p>–</p>', html)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
backmari/moose
|
docs/tests/markdown_extensions/test_markdown_extensions.py
|
Python
|
lgpl-2.1
| 3,380
|
[
"MOOSE"
] |
24f8904bca44bbb0b7063648c51c41847d95b9159d03d6b396dd22d268286ca4
|
import numpy as np
from scipy.linalg import cholesky, qr, lstsq
"""
The Q_discrete_white_noise function is taken from filterpy by Roger R Labbe Jr. (MIT Licensed)
"""
def Q_discrete_white_noise(dim, dt=1., var=1.):
""" Returns the Q matrix for the Discrete Constant White Noise
Model. dim may be either 2 or 3, dt is the time step, and sigma is the
variance in the noise.
Q is computed as the G * G^T * variance, where G is the process noise per
time step. In other words, G = [[.5dt^2][dt]]^T for the constant velocity
model.
Parameters
-----------
dim : int (2 or 3)
dimension for Q, where the final dimension is (dim x dim)
dt : float, default=1.0
time step in whatever units your filter is using for time. i.e. the
amount of time between innovations
var : float, default=1.0
variance in the noise
"""
assert dim == 2 or dim == 3
if dim == 2:
Q = np.array([[.25*dt**4, .5*dt**3],
[ .5*dt**3, dt**2]], dtype=float)
else:
Q = np.array([[.25*dt**4, .5*dt**3, .5*dt**2],
[ .5*dt**3, dt**2, dt],
[ .5*dt**2, dt, 1]], dtype=float)
return Q * var
def cholupdate(R,x,sign):
#http://stackoverflow.com/questions/8636518/dense-cholesky-update-in-python
p = np.size(x)
x = x.T
for k in range(p):
if sign == '+':
r = np.sqrt(R[k,k]**2 + x[k]**2)
elif sign == '-':
r = np.sqrt(R[k,k]**2 - x[k]**2)
c = r/R[k,k]
s = x[k]/R[k,k]
R[k,k] = r
if sign == '+':
R[k,k+1:p] = (R[k,k+1:p] + s*x[k+1:p])/c
elif sign == '-':
R[k,k+1:p] = (R[k,k+1:p] - s*x[k+1:p])/c
x[k+1:p]= c*x[k+1:p] - s*R[k, k+1:p]
return R
def sigmapoint_function(state, S):
# TODO: Fill this in
pass
def process_function(state, Q, dt):
# TODO: Add first column of Q to output
output = np.nan * np.ones((state.size,))
# Position and linear updates
output[(st_pos_x, st_pos_y, st_pos_z),] = state[(st_pos_x, st_pos_y, st_pos_z),] \
+ dt * state[(st_lvel_x, st_lvel_y, st_lvel_z),] \
+ dt * dt * state[(st_lacc_x, st_lacc_y, st_lacc_z),] / 2
output[(st_lvel_x, st_lvel_y, st_lvel_z),] = state[(st_lvel_x, st_lvel_y, st_lvel_z),] \
+ dt * state[(st_lacc_x, st_lacc_y, st_lacc_z),]
output[(st_lacc_x, st_lacc_y, st_lacc_z),] = state[(st_lacc_x, st_lacc_y, st_lacc_z),]
# Orientation update
delta_q = quat_from_axang(state[(st_avel_x, st_avel_y, st_avel_z),], dt)
q_out = quaternion_multiply(state[(st_e_w, st_e_x, st_e_y, st_e_z),], delta_q) # Maybe to do: *qnoise in the middle
output[(st_e_w, st_e_x, st_e_y, st_e_z),] = q_out / np.linalg.norm(q_out)
output[(st_avel_x, st_avel_y, st_avel_z),] = state[(st_avel_x, st_avel_y, st_avel_z),]
return output
def mean_function(sigmas, weights):
# TODO: Only for ax-angle
# Calculate average state x from sigma points.
x = np.nan * np.ones(sigmas.shape[1])
# For position, velocity, acceleration, and angular velocity this is simple
x[st_lin_list,] = np.dot(weights, sigmas[:, st_lin_list])
# For orientation, we need the weighted average quaternion.
# http://stackoverflow.com/questions/12374087/average-of-multiple-quaternions
Q = sigmas[:, (st_e_w, st_e_x, st_e_y, st_e_z)].T * weights
eig_val, eig_vec = np.linalg.eig(np.dot(Q, Q.T))
q_out = eig_vec[:, np.argmax(eig_val)]
q_out /= np.linalg.norm(q_out)
x[(st_e_w, st_e_x, st_e_y, st_e_z),] = q_out
return x
pass
def residualx_function(sigma, state):
# TODO: Fill this in
pass
def observation_function(state, R):
# TODO: add first row of R to output
output = np.nan * np.ones((R.shape[0],))
q = state[(st_e_w, st_e_x, st_e_y, st_e_z),]
# world_to_cntrl = quaternion_matrix(quaternion_conjugate(q))
accel_world_g = np.hstack((0, state[(st_lacc_x, st_lacc_y, st_lacc_z),] + grav_world)) / GRAVITY
output[(obs_accel_x, obs_accel_y, obs_accel_z),] = quaternion_multiply(q, quaternion_multiply(accel_world_g,
quaternion_conjugate(
q)))[1:]
# + accel_bias + accel_noise
output[(obs_gyro_x, obs_gyro_y, obs_gyro_z),] = state[
(st_avel_x, st_avel_y, st_avel_z),] # + gyro_bias + gyro_noise
output[(obs_mag_x, obs_mag_y, obs_mag_z),] = quaternion_multiply(q, quaternion_multiply(mag_world,
quaternion_conjugate(
q)))[1:]
# + mag_bias + mag_noise
output[(obs_track_x, obs_track_y, obs_track_z),] = state[(st_pos_x, st_pos_y, st_pos_z),]
return output
def innovation_function(real_obs, pred_obs):
#Not used by me
return np.subtract(pred_obs, real_obs)
def addx_fn(state1, state2):
#TODO: Handle angle-axis
return state1 + state2
class Noise(object):
def __init__(self, type='gaussian', cov_type='sqrt', dim=0, mu=0, cov=None, adapt_method=None, adapt_params=None):
self.cov_type = cov_type
self.dim = dim
self.mu = mu
cov = cov if cov else np.eye(self.dim)
if self.cov_type == 'sqrt':
self.cov = cholesky(cov)
else:
self.cov = cov
self.adapt_method = adapt_method
self.adapt_params = adapt_params
class SRUKF(object):
def __init__(self, init_x=None, init_S=None, init_Q=None, init_R=Noise(dim=1),
alpha=0.1, beta=2.0, kappa=None,
special_state_inds=None, sigmapoint_fn=None, process_fn=None, mean_fn=None, residualx_fn=None):
self.x = init_x if init_x else 0.0
self.S = init_S if init_S else Noise(dim=self._xdim)
self.Q = init_Q if init_Q else Noise(dim=self._xdim)
self.R = init_R
self.alpha = alpha
self.beta = beta
self.kappa = kappa if kappa else (3 - self._xdim)
self.reset_weights()
self.sigmapoint_fn = sigmapoint_fn
self.special_xinds = np.arange(self._xdim) if (special_state_inds is None and self.sp_fn is not None) else special_state_inds
self.process_fn = process_fn
self.mean_fn = mean_fn
self.residualx_fn = residualx_fn
@property
def x(self):
return self._x
@x.setter
def x(self, val):
self._xdim = val.size if val is not None else 0
self._x = val
def reset_weights(self):
L = self._xdim + self.Q.dim + self.R.dim
nsp = 2 * L + 1 # Number of sigma points
kappa = self.alpha**2 * (L + self.kappa) - L
W = np.asarray([kappa, 0.5, 0]) / (L + kappa)
W[2] = W[0] + (1 - self.alpha**2) + self.beta
self.W2isPos = W[2] >= 0
self.W = W
sqrtW = self.W
sqrtW[2] = np.abs(sqrtW[2])
self.sqrtW = np.sqrt(sqrtW)
self.sqrt_L_plus_kappa = np.sqrt(L + kappa)
def predict_step(self, dt, save=False):
# Create augmented state variable
x_a = np.concatenate((self.x, self.Q.mu, self.R.mu))
# Create augmented covariance
L = self.S.dim + self.Q.dim + self.R.dim
S_a = np.zeros((L, L))
S_a[0:self.S.dim, 0:self.S.dim] = self.S.cov
S_a[self.S.dim:self.S.dim+self.Q.dim, self.S.dim:self.S.dim+self.Q.dim] = self.Q.cov
S_a[L-self.R.dim:, L-self.R.dim:] = self.R.cov
S_a *= self.sqrt_L_plus_kappa
# Calculate sigma points
nsp = 2 * L + 1
X_a = np.tile(x_a, (1, nsp))
# Default sigma point calculation
X_a[:, 1:self._xdim + 1] += S_a
X_a[:, self._xdim + 1:] -= S_a
# Special sigma point calculation
if self.sigmapoint_fn is not None:
X_a[self.special_xinds, :] = self.sigmapoint_fn(x_a[self.special_xinds], S_a[self.special_xinds, :])
# Propagate sigmapoints through process function. It should know how to handle special_inds
X_k = np.nan * np.ones((self._xdim, X_a.shape[1]))
for sig_ix in range(X_a.shape[1]):
X_k[:, sig_ix] = self.process_fn(X_a[:self._xdim, sig_ix], X_a[self._xdim+1:self._xdim+self.Q.dim, sig_ix], dt)
# Predicted state = weighted sum of propagated sigma points
# Default mean
x_k = self.W[0] * X_k[:, 0] + self.W[1] * np.sum(X_k[:, 1:], axis=1)
if self.mean_fn is not None:
# Special mean
x_k[self.special_xinds] = self.mean_fn(X_k[self.special_xinds, :], self.W)
# Sigma residuals, used in process noise
# Default residuals
X_k_residuals = np.subtract(X_k, x_k)
if self.residualx_fn is not None:
# Special residuals
for sig_ix in range(X_k.shape[1]):
X_k_residuals[self.special_xinds, sig_ix] = self.residualx_fn(X_k[self.special_xinds, sig_ix], x_k[self.special_xinds])
# process noise = qr update of weighted X_k_residuals
[_, S_k] = qr((self.sqrtW[1] * X_k_residuals[:, 1:]).T, mode='economic') # Upper
S_k = cholupdate(S_k, self.sqrtW[2] * X_k_residuals[:, 0], '+' if self.W2isPos else '-') # Upper
if save:
self.X_a = X_a # Augmented sigma points
self.x_k = x_k # Predicted state
self.X_k = X_k # Predicted sigma points
self.X_k_residuals = X_k_residuals
self.S_k = S_k # Process noise
return x_k, S_k.T
def update(self, observation):
L = self.S.dim + self.Q.dim + self.R.dim
sigma_R = self.X_a[L-self.R.dim:, :]
# Predict observation sigma points
# TODO: Handle incomplete observation; only predict necessary variables
Y_k = np.nan*np.ones((self.R.dim, self.X_k.shape[1]))
for sig_ix in range(self.X_k.shape[1]):
Y_k[:, sig_ix] = self.observation_fn(self.X_k[:, sig_ix], sigma_R[:, sig_ix])
# Predicted observation = weighted sum of observation sigma points
y_k = self.W[0]*Y_k[:, 0] + self.W[1]*np.sum(Y_k[:, 1:], axis=1)
# TODO: custom observation mean if any variables are not addable
# Observation residuals
Y_k_residuals = np.subtract(Y_k, y_k)
# Observation covariance
[_, S_y] = qr((self.sqrtW[1] * Y_k_residuals[:, 1:]).T, mode='economic') # Upper
S_y = cholupdate(S_y, self.sqrtW[2] * Y_k_residuals[:, 0], '+' if self.W2isPos else '-') # Upper
S_y = S_y.T # Need lower from upper
# State-Observation covariance
P_xy = self.W[2] * self.X_k_residuals[:, 0]*Y_k_residuals[:, 0].T + self.W[1]*self.X_k_residuals[:, 1:]*Y_k_residuals[:, 1:].T
# Kalman gain
K = lstsq(S_y, P_xy.T)[0]
K = lstsq(S_y.T, K)[0]
K = K.T
# Calculate innovation
innovation = np.subtract(observation, y_k)
# TODO: custom innovation if any observation variables are not addable
# Update state estimate
upd = K*innovation
self.x = self.x_k + upd
if self.addx_fn is not None:
self.x[self.special_xinds] = self.addx_fn(self.x_k, upd[self.special_xinds])
# Update state covariance
cov_update_vectors = K * S_y # Correct covariance.This is equivalent to: Px = Px_ - KG * Py * KG';
for j in range(self.R.dim):
self.S_k = cholupdate(self.S_k, cov_update_vectors[:, j], '-')
self.S = self.S_k.T
if self.S.adapMethod:
#TODO: self.S.cov = adapted_S
pass
|
zelmon64/PSMoveService
|
misc/python/pypsmove/srukf.py
|
Python
|
apache-2.0
| 11,894
|
[
"Gaussian"
] |
761c95d288e5bd20142f4544d1ba72999f341282cddf390781dbdc0174d00e90
|
# Orca
#
# Copyright (C) 2013-2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013-2014 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.orca as orca
import orca.orca_state as orca_state
import orca.scripts.default as default
import orca.speech as speech
from .script_utilities import Utilities
class Script(default.Script):
def __init__(self, app):
default.Script.__init__(self, app)
def getUtilities(self):
return Utilities(self)
def onActiveDescendantChanged(self, event):
"""Callback for object:active-descendant-changed accessibility events."""
role = event.source.getRole()
try:
focusedRole = orca_state.locusOfFocus.getRole()
except:
pass
else:
# This is very likely typeahead search and not a real focus change.
tableRoles = [pyatspi.ROLE_TABLE, pyatspi.ROLE_TREE_TABLE]
if focusedRole == pyatspi.ROLE_TEXT and role in tableRoles:
orca.setLocusOfFocus(event, event.source, False)
default.Script.onActiveDescendantChanged(self, event)
def onCheckedChanged(self, event):
"""Callback for object:state-changed:checked accessibility events."""
obj = event.source
if self.utilities.isSameObject(obj, orca_state.locusOfFocus):
default.Script.onCheckedChanged(self, event)
return
# Present changes of child widgets of GtkListBox items
isListBox = lambda x: x and x.getRole() == pyatspi.ROLE_LIST_BOX
if not pyatspi.findAncestor(obj, isListBox):
return
self.updateBraille(obj)
speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True))
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
role = event.source.getRole()
try:
focusRole = orca_state.locusOfFocus.getRole()
except:
focusRole = None
if role == pyatspi.ROLE_FRAME and focusRole == pyatspi.ROLE_TABLE_CELL:
return
default.Script.onNameChanged(self, event)
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# NOTE: This event type is deprecated and Orca should no longer use it.
# This callback remains just to handle bugs in applications and toolkits
# during the remainder of the unstable (3.11) development cycle.
role = event.source.getRole()
# https://bugzilla.gnome.org/show_bug.cgi?id=711397
if role == pyatspi.ROLE_COMBO_BOX:
orca.setLocusOfFocus(event, event.source)
return
# The above issue also seems to happen with spin buttons.
if role == pyatspi.ROLE_SPIN_BUTTON:
orca.setLocusOfFocus(event, event.source)
return
# https://bugzilla.gnome.org/show_bug.cgi?id=720987
if role == pyatspi.ROLE_TABLE_COLUMN_HEADER:
orca.setLocusOfFocus(event, event.source)
return
# https://bugzilla.gnome.org/show_bug.cgi?id=720989
if role == pyatspi.ROLE_MENU == event.source.parent.getRole():
orca.setLocusOfFocus(event, event.source)
return
# Unfiled, but a similar case of the above issue with combo boxes.
# Seems to happen for checkboxes too. This is why we can't have
# nice things.
if role in [pyatspi.ROLE_PUSH_BUTTON, pyatspi.ROLE_CHECK_BOX]:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. Happens in Evolution, but for what seems to be a generic
# Gtk+ toggle button. So we'll handle it here.
if role == pyatspi.ROLE_TOGGLE_BUTTON:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. But this happens when you are in Gedit, get into a menu
# and then press Escape. The text widget emits a focus: event, but
# not a state-changed:focused event.
#
# A similar issue can be seen when a text widget starts out having
# focus, such as in the old gnome-screensaver dialog.
if role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_PASSWORD_TEXT]:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. When a context menu first appears and an item is already
# selected, we get a focus: event for that menu item, but there is
# not a state-changed event for that item, nor a selection-changed
# event for the menu.
if role == pyatspi.ROLE_MENU_ITEM:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. When a canvas item gets focus but is not selected, we
# are only getting a focus event. This happens in Nautilus.
if role == pyatspi.ROLE_CANVAS:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled, but yet another case of only getting a focus: event when
# a widget appears in a parent container and is already focused.
# An example of this particular case is the list of elements dialogs.
if role == pyatspi.ROLE_TABLE:
obj = event.source
selectedChildren = self.utilities.selectedChildren(obj)
if selectedChildren:
obj = selectedChildren[0]
orca.setLocusOfFocus(event, obj)
return
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
obj = event.source
if not self.utilities._isNonModalPopOver(obj):
default.Script.onShowingChanged(self, event)
return
if event.detail1:
speech.speak(self.speechGenerator.generateSpeech(obj))
labels = self.utilities.unrelatedLabels(obj)
msg = ' '.join(map(self.utilities.displayedText, labels))
self.presentMessage(msg)
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
default.Script.onTextSelectionChanged(self, event)
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/toolkits/gtk/script.py
|
Python
|
gpl-3.0
| 7,150
|
[
"ORCA"
] |
8df44fd3ba1fee434d58347990f3282c9890b2ea4949717645357bde2bd4e40a
|
"""MPF plugin for sounds. Includes SoundController, Channel, Sound, Track, and
StreamTrack parent classes."""
# sound.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
import Queue
import uuid
import copy
import sys
from mpf.system.assets import Asset, AssetManager
from mpf.system.utility_functions import Util
global import_success
try:
import pygame
import pygame.locals
import_success = True
except:
import_success = False
def preload_check(machine):
if import_success:
return True
else:
return False
class SoundController(object):
"""Parent class for the sound controller which is responsible for all audio,
sounds, and music in the machine. There is only one of these per machine.
Args:
machine: The main machine controller object.
"""
def __init__(self, machine):
self.log = logging.getLogger('SoundController')
self.machine = machine
if 'sound_system' not in self.machine.config:
self.config = dict()
return # todo move to preload_check()
self.log.debug("Loading the Sound Controller")
self.machine.sound = self
self.config = self.machine.config['sound_system']
self.tracks = dict() # k = track name, v = track obj
self.stream_track = None
self.pygame_channels = list()
self.sound_events = dict()
self.volume = 1.0
if 'volume_steps' not in self.config:
self.config['volume_steps'] = 20
if 'initial_volume' in self.config:
self.volume = self.config['initial_volume']
self.set_volume(volume=self.volume)
self.machine.request_pygame()
# Get the pygame pre-initiaiization audio requests in
# 0 is the 'auto' setting for all of these
if 'buffer' not in self.config or self.config['buffer'] == 'auto':
self.config['buffer'] = 0
if 'bits' not in self.config or self.config['bits'] == 'auto':
self.config['bits'] = 0
if 'frequency' not in self.config or self.config['frequency'] == 'auto':
self.config['frequency'] = 0
if 'channels' not in self.config:
self.config['channels'] = 1
pygame.mixer.pre_init(frequency=self.config['frequency'],
size=self.config['bits'],
channels=self.config['channels'],
buffer=self.config['buffer']
)
# Note pygame docs says pre_init() kwarg should be 'buffersize', but
# it's actually 'buffer'.
self.log.debug("Configuring Pygame Mixer. Frequency: %s, Size: %s, "
"channels: %s, buffer: %s", self.config['frequency'],
self.config['bits'], self.config['channels'],
self.config['buffer'])
# Register events
self.machine.events.add_handler('action_set_volume', self.set_volume)
self.machine.events.add_handler('pygame_initialized', self._initialize)
if 'sound_player' in self.machine.config:
self.machine.events.add_handler('init_phase_5',
self.register_sound_events,
config=self.machine.config['sound_player'])
self.machine.mode_controller.register_start_method(
self.register_sound_events, 'sound_player')
def _initialize(self):
# Initialize the sound controller. Not done in __init__() because we
# need Pygame to be setup first.
try:
frequency, bits, channels = pygame.mixer.get_init()
except TypeError:
self.log.error("Could not initialize audio. Does your computer "
"have an audio device? Maybe it doesn't create one"
"if there are no speakers plugged in?")
sys.exit()
self.log.debug("Pygame Sound Mixer configuration. Freq: %s, Bits: %s, "
"Channels: %s", frequency, bits, channels)
# Configure Pygame to use the correct number of channels. We need one
# for each simultaneous sound we want to play.
num_channels = 0 # How many total
if 'tracks' in self.config:
for item in self.config['tracks'].values():
if 'simultaneous_sounds' in item:
num_channels += item['simultaneous_sounds']
else:
num_channels += 1
if not num_channels:
num_channels = 1
pygame.mixer.set_num_channels(num_channels)
# Configure Tracks
if 'tracks' in self.config:
for k, v in self.config['tracks'].iteritems():
self.create_track(name=k, config=v)
else:
self.create_track(name='default')
# Configure streaming track
if 'stream' in self.config:
if 'name' not in self.config['stream']:
self.config['stream']['name'] = 'music'
self.stream_track = StreamTrack(self.machine, self.config)
# Create the sound AssetManager
AssetManager(
machine=self.machine,
config_section=config_section,
path_string=(self.machine.config['media_controller']['paths'][path_string]),
asset_class=asset_class,
asset_attribute=asset_attribute,
file_extensions=file_extensions)
def create_track(self, name, config=None):
""" Creates a new MPF track add registers in the central track list.
Args:
name: String name of this track used for identifying where sounds
are played.
config: Config dictionary for this track.
Note: "Tracks" in MPF are like channels.. you might have a "music"
track, a "voice" track, a "sound effects" track, etc.
"""
self.tracks[name] = Track(self.machine, name, self.pygame_channels,
config)
def register_sound_events(self, config, mode=None, priority=0):
# config is sound_player subection of config dict
self.log.debug("Processing sound_player configuration. Base Priority: "
"%s", priority)
self.log.debug("config: %s", config)
key_list = list()
for entry_name in config:
if 'block' not in config[entry_name]:
config[entry_name]['block'] = False
block = config[entry_name].pop('block')
key_list.append(self.register_sound_event(config=config[entry_name],
priority=priority,
block=block))
return self.unregister_sound_events, key_list
def unregister_sound_events(self, key_list):
self.log.debug("Unloading sound_player events")
for key in key_list:
self.unregister_sound_event(key)
def register_sound_event(self, config, priority=0, block=False):
"""Sets up game sounds from the config file.
Args:
config: Python dictionary which contains the game sounds settings.
"""
self.log.debug("Registering sound events from config: %s", config)
if 'sound' not in config:
return False
elif type(config['sound']) is str:
config['sound'] = self.machine.sounds[config['sound']]
# this is kind of weird because once the sound has been registered, the
# sound will still be converted from the string to the object. This is
# an unintended side effect of passing around a dict, but I guess it's
# ok? We just have to check to make sure we have a string before we
# try to convert it to an object. If not, the conversion has already
# been done.
if 'start_events' not in config:
config['start_events'] = list()
else:
config['start_events'] = Util.string_to_list(
config['start_events'])
if 'stop_events' not in config:
config['stop_events'] = list()
else:
config['stop_events'] = Util.string_to_list(
config['stop_events'])
if 'duration' not in config or config['duration'] is None:
config['duration'] = None
if 'loops' not in config or config['loops'] is None:
config['loops'] = 0
if 'priority' not in config or config['priority'] is None:
config['priority'] = 0
if 'fade_in' not in config or config['fade_in'] is None:
config['fade_in'] = 0
if 'fade_out' not in config or config['fade_out'] is None:
config['fade_out'] = 0
if 'channel' not in config or config['channel'] is None:
config['channel'] = 'auto'
if 'volume' not in config or config['volume'] is None:
config['volume'] = 1
elif config['volume'] > 2:
config['volume'] = 2
config['key'] = uuid.uuid4()
#config['event_keys'] = set()
for event in config['start_events']:
self.log.debug("Checking config for event '%s'", event)
settings = copy.copy(config)
settings.pop('start_events')
settings.pop('stop_events')
if event not in self.sound_events:
self.sound_events[event] = list()
self.log.debug("Adding '%s' to sound_events list", event)
self.machine.events.add_handler(event,
self._sound_event_callback,
event_name=event)
kwargs = dict() # temp
sound_event_entry = dict()
sound_event_entry['settings'] = settings
sound_event_entry['kwargs'] = kwargs
sound_event_entry['priority'] = priority
sound_event_entry['block'] = block
sound_event_entry['type'] = 'start'
self.log.debug("Registering Sound for Event: %s. Settings: %s",
event, settings)
self.sound_events[event].append(sound_event_entry)
for event in config['stop_events']:
settings = copy.copy(config)
settings.pop('start_events')
settings.pop('stop_events')
if event not in self.sound_events:
self.sound_events[event] = list()
self.machine.events.add_handler(event,
self._sound_event_callback,
event_name=event)
kwargs = dict() # temp
sound_event_entry = dict()
sound_event_entry['settings'] = settings
sound_event_entry['kwargs'] = kwargs
sound_event_entry['priority'] = priority
sound_event_entry['block'] = block
sound_event_entry['type'] = 'stop'
self.log.debug("Registering Sound for Event: %s. Settings: %s",
event, settings)
self.sound_events[event].append(sound_event_entry)
# todo sort by priority
return config['key']
def unregister_sound_event(self, key):
self.log.debug("Unregistering sound events")
for event in self.sound_events.keys():
for entry in self.sound_events[event][:]:
if entry['settings']['key'] == key:
self.log.debug("Remvoing %s from event %s", entry, event)
self.sound_events[event].remove(entry)
if not self.sound_events[event]:
self.machine.events.remove_handler_by_event(event,
self._sound_event_callback)
del self.sound_events[event]
def _sound_event_callback(self, event_name, **kwargs):
# Loop through all the sound events for this event
if event_name not in self.sound_events:
self.log.critical("got sound callback but did not find event?")
raise Exception()
sound_list = self.sound_events[event_name]
self.log.debug("Sound event callback. Sound list: %s", sound_list)
for sound in sound_list:
self.log.debug("Checking sound: %s", sound)
sound_obj = sound['settings']['sound']
kwargs = sound['settings']
if sound['type'] == 'start':
self.log.debug("Playing sound")
sound_obj.play(**kwargs)
elif sound['type'] == 'stop':
self.log.debug("Stopping sound")
sound_obj.stop(**kwargs)
def set_volume(self, volume=None, change=None, **kwargs):
"""Sets the overall volume of the sound system.
Args:
volume: The new volume level, a floating point value between 0.0
and 1.0. 1.0 is full volume. 0.0 is mute.
change: A positive or negative value between 0.0 and 1.0 of a
change in volume that will be made.
kwargs: Not used here. Included because this method is often
called from events which might contain additional kwargs.
Note that the volume can never be increased above 1.0. This sound
volume level only affects MPF. You might have to set the overall
system sound to in the OS.
"""
old_volume = self.volume
if volume:
self.volume = float(volume)
elif change:
self.volume += float(change)
if self.volume > 1.0:
self.volume = 1.0
elif self.volume < 0:
self.volume = 0.0
display_volume = int(self.volume * self.config['volume_steps'])
if display_volume == self.config['volume_steps']:
display_volume = "MAX"
elif display_volume:
display_volume = str(display_volume)
else:
display_volume = "OFF" # todo move to config
# todo change volume of currently playing sounds
for channel in self.pygame_channels:
if channel.pygame_channel.get_busy():
playing_sound = channel.pygame_channel.get_sound()
new_volume = (1.0 *
self.volume *
channel.current_sound.config['volume'] *
channel.parent_track.volume)
playing_sound.set_volume(new_volume)
if self.stream_track and pygame.mixer.music.get_busy():
new_volume = (1.0 *
self.volume *
self.stream_track.volume *
self.stream_track.current_sound.config['volume'])
pygame.mixer.music.set_volume(new_volume)
self.machine.events.post('volume_change', volume=self.volume,
change=old_volume-self.volume,
display_volume=display_volume)
def get_volume(self):
return self.volume
class Track(object):
"""Parent class for an MPF track. Each sound track in MPF can be made up
of one or more Pygame sound channels to support multiple simultaneous
sounds.
Args:
machine: The main machine controller object.
name: A string of the name this channel will be referred to, such as
"voice" or "sfx."
global_channel_list: A python list which keeps track of the global
Pygame channels in use.
config: A python dictionary containing the configuration settings for
this track.
"""
def __init__(self, machine, name, global_channel_list, config):
self.log = logging.getLogger('Track.' + name)
self.log.debug("Creating Track with config: %s", config)
self.name = name
self.config = config
self.pygame_channels = list()
self.volume = 1
self.queue = Queue.PriorityQueue()
if 'simultaneous_sounds' not in self.config:
self.config['simultaneous_sounds'] = 1
if 'preload' not in self.config:
self.config['preload'] = False
if 'volume' in self.config:
self.volume = self.config['volume']
for track in range(self.config['simultaneous_sounds']):
self.create_channel(machine, global_channel_list)
machine.events.add_handler('timer_tick', self._tick)
def __repr__(self):
return '<Track.{}>'.format(self.name)
def create_channel(self, machine, global_channel_list):
"""Factory method which creates a Pygame sound channel to be used with
this track.
Args:
machine: The main machine object.
global_channel_list: A list which contains the global list of
Pygame channels in use by MPF.
"""
next_channel_num = len(global_channel_list)
this_channel_object = Channel(machine, self, next_channel_num)
global_channel_list.append(this_channel_object)
self.pygame_channels.append(this_channel_object)
def play(self, sound, priority, **settings):
"""Plays a sound on this track.
Args:
sound: The MPF sound object you want to play.
priority: The relative priority of this sound.
**settings: One or more additional settings for this playback.
This method will automatically find an available Pygame channel to use.
If this new sound has a higher priority than the lowest playing sound,
it will interrupt that sound to play. Otherwise it will be added to the
queue to be played when a channel becomes available.
"""
self.log.debug("Received request to play sound %s. Priority %s, "
"settings: %s", sound, priority, settings)
# Make sure we have a sound object. If not we assume the sound is being
# loaded (is that dumb?) and we add it to the queue so it will be
# picked up on the next loop.
if not sound.sound_object:
self.log.debug("Sound is not loaded. Queueing...")
self.queue_sound(sound, priority, **settings)
return
# We have a sound object. Do we have an available channel?
found_available_channel = False
# todo check to see if this sound is already playing and what our
# settings are for that.
for channel in self.pygame_channels: # todo change to generator
if channel.current_sound_priority == -1:
found_available_channel = True
self.log.debug("Found an available channel: %s", channel)
channel.play(sound, priority=priority, **settings)
break
# No available channels. What do we do with this sound now? Options:
# 1. If the priority of the lowest currently-playing sound is lower than
# ours, kill that sound and replace it with the new one.
# 2. Add this to the queue, arranged by priority
if not found_available_channel:
self.log.debug("Did not find an available channel")
lowest_channel = min(self.pygame_channels)
if lowest_channel.current_sound_priority < priority:
self.log.debug("New sound is higher priority than the current "
"lowest priority sound. Pre-empting")
lowest_channel.play(sound, priority=priority, **settings)
else:
if sound.expiration_time:
exp_time = time.time() + sound.expiration_time
else:
exp_time = None
self.queue_sound(sound, priority=priority, exp_time=exp_time,
**settings)
def stop(self, sound):
try:
sound.sound_object.stop()
except AttributeError:
pass
def queue_sound(self, sound, priority, exp_time=None, **settings):
"""Adds a sound to the queue to be played when a Pygame channel becomes
free.
Args:
sound: The MPF sound object.
priority: The priority of this sound.
exp_time: Real world time of when this sound will expire. (It will
not play if the queue is freed up after it expires.)
**settings: Additional settings for this sound's playback.
Note that this method will insert this sound into a position in the
queue based on its priority, so highest-priority sounds are played
first.
"""
# Note the negative operator in front of priority since this queue
# retrieves the lowest values first, and MPF uses higher values for
# higher priorities.
self.log.debug("Queueing sound %s, priority: %s, exp_time: %s",
sound, priority, exp_time)
self.queue.put([-priority, sound, exp_time, settings])
def get_sound(self):
"""Returns the next sound from the queue to be played.
Returns: A tuple of the sound object, the priority, and dictionary of
additional settings for that sound. If the queue is empty, returns
None.
This method will ensure that the sound returned has not expired. If the
next sound in the queue is expired, it removes it and returns the next
one.
"""
try:
next_sound = self.queue.get_nowait()
except Queue.Empty:
return
if not next_sound[2] or next_sound[2] >= time.time():
return next_sound[1], next_sound[0], next_sound[3]
else:
self.get_sound() # todo this is bad, right?
def _tick(self):
if not self.queue.empty():
sound, priority, settings = self.get_sound()
self.play(sound, priority=priority, **settings)
class StreamTrack(object):
"""Parent class for MPF's "Stream" track which corresponds to Pygame's
music channel.
Args:
machine: The main machine object.
config: Python dictionary containing the configuration settings for
this track.
Sounds played on this track are streamed from disk rather than loaded into
memory. This is good for background music since those files can be large
and there's only one playing at a time.
"""
def __init__(self, machine, config):
self.log = logging.getLogger('Streaming Channel')
self.log.debug("Creating Stream Track with config: %s", config)
self.machine_sound = machine.sound
self.config = config
self.name = 'music'
self.volume = 1
self.current_sound = None
if 'name' in self.config:
self.name = self.config['name']
if 'volume' in self.config:
self.volume = self.config['volume']
self.config['preload'] = False
def __repr__(self):
return '<StreamTrack.{}>'.format(self.name)
def play(self, sound, **settings):
"""Plays a sound on this track.
Args:
sound: The MPF sound object to play.
**settings: Additional settings for this sound's playback.
This stream track only supports playing one sound at a time, so if
you call this when a sound is currently playing, the new sound will
stop the current sound.
"""
self.current_sound = sound
pygame.mixer.music.load(sound.file_name)
volume = (1.0 *
self.volume *
sound.config['volume'] *
self.machine_sound.volume)
if 'volume' in settings:
volume *= settings['volume']
pygame.mixer.music.set_volume(volume)
self.log.debug("Playing Sound: %s Vol: %s", sound,
pygame.mixer.music.get_volume())
if 'loops' not in settings:
settings['loops'] = 1
pygame.mixer.music.play(settings['loops'])
def stop(self, sound=None):
"""Stops the playing sound and resets the current position to the
beginning.
"""
pygame.mixer.music.stop()
# todo add support for fade out
def pause(self):
"""Pauses the current sound and remembers the current position so
playback can be resumed from the same point via the unpause() method.
"""
pygame.mixer.music.pause()
# todo add support for fade out
def unpause(self):
"""Resumes playing of a previously-paused sound. If the sound was not
paused, it starts playing it from the beginning.
"""
pygame.mixer.music.unpause()
# todo add support for fade in
def fadeout(self, ms):
"""Fades the sound out.
Args:
ms: The number of milliseconds to fade out the sound.
"""
pygame.mixer.music.fadeout(ms)
# todo add support for MPF time duration strings
class Channel(object):
"""Parent class that holds a Pygame sound channel. One or more of these are
tied to an MPF Track.
Args:
machine: The main machine object.
parent_track: The MPF track object this channel belongs to.
channel_number: Integer number that is used to identify this channel.
"""
def __init__(self, machine, parent_track, channel_number):
self.log = logging.getLogger('Sound Channel {}'.format(channel_number))
self.channel_number = channel_number
self.machine_sound = machine.sound
self.current_sound_priority = -1
self.current_sound = None
self.pygame_channel = pygame.mixer.Channel(channel_number)
self.parent_track = parent_track
# configure this pygame channel to post a pygame event when it's done
# playing a sound
self.pygame_channel.set_endevent(
pygame.locals.USEREVENT + channel_number)
# add a pygame event handler so this channel object gets notified of
# the above
machine.register_pygame_handler(
pygame.locals.USEREVENT + channel_number, self.sound_is_done)
def __repr__(self):
return '<Channel {}, Parent:{}>'.format(self.channel_number,
self.parent_track)
def __cmp__(self, other):
# Used so we can sort the channel list by the priority of the current
# playing sound
return cmp(self.current_sound_priority, other.current_sound_priority)
def sound_is_done(self):
"""Indicates that the sound that was playing on this channel is now
done.
This is the callback method that's automatically called by Pygame. It
will check the queue and automatically play any queued sounds."""
self.current_sound_priority = -1
if not self.parent_track.queue.empty():
sound, priority, settings = self.parent_track.get_sound()
self.play(sound, priority=priority, **settings)
def play(self, sound, **settings):
"""Plays a sound on this channel.
Args:
sound: The sound object to play.
**settings: Additional settings for this sound's playback.
"""
self.current_sound = sound
self.current_sound_priority = settings['priority']
if 'loops' in settings:
loops = settings['loops']
# calculate the volume for this sound
# start with the sound volume, multiply the overall and track volumes
volume = (1.0 *
self.parent_track.volume *
sound.config['volume'] *
self.machine_sound.volume)
if 'volume' in settings:
volume *= settings['volume']
# set the sound's current volume
sound.sound_object.set_volume(volume)
self.log.debug("Playing Sound: %s Vol: %s", sound,
sound.sound_object.get_volume())
self.pygame_channel.play(sound.sound_object, loops)
class Sound(Asset):
def _initialize_asset(self):
if self.config['track'] in self.machine.sound.tracks:
self.track = self.machine.sound.tracks[self.config['track']]
elif self.config['track'] == self.machine.sound.stream_track.name:
self.track = self.machine.sound.stream_track
else:
self.asset_manager.log.critical("Music track not found: %s",
self.config['track'])
raise Exception()
self.sound_object = None
self.priority = 0
self.expiration_time = None
if 'volume' not in self.config:
self.config['volume'] = 1
if 'max_queue_time' not in self.config: # todo
self.config['max_queue_time'] = None
if 'max_simultaneous_playing' not in self.config: # todo
self.config['max_simultaneous_playing'] = None
if 'fade_in' not in self.config: # todo
self.config['fade_in'] = 0
if 'fade_out' not in self.config: # todo
self.config['fade_out'] = 0
if 'loops' not in self.config: # todo
self.config['loops'] = None
if 'start_time' not in self.config: # todo
self.config['start_time'] = None
if 'end_time' not in self.config: # todo
self.config['end_time'] = None
def __repr__(self):
return '<Sound: {}>'.format(self.file_name)
def do_load(self, callback):
try:
self.sound_object = pygame.mixer.Sound(self.file_name)
except pygame.error:
self.asset_manager.log.error("Pygame Error for file %s. '%s'",
self.file_name, pygame.get_error())
self.loaded = True
if callback:
callback()
def _unload(self):
self.sound_object = None
def play(self, loops=0, priority=0, fade_in=0, volume=1, **kwargs):
"""Plays this sound.
Args:
loops: Integer of how many times you'd like this sound to repeat.
A value of -1 means it will loop forever.
priority: The relative priority of this sound which controls what
happens if the track this sound is playing on is playing the
max simultaneous sounds.
fade_in: MPF time string for how long this sound should fade in
when it starts.
volume: Volume for this sound as a float between 0.0 and 1.0. Zero
is mute, 1 is full volume, anything in between is in between.
**kwargs: Catch all since this method might be used as an event
callback which could include random kwargs.
"""
self.asset_manager.log.debug("Playing sound %s. Loops: %s, Priority: %s, "
"Fade in: %s, Vol: %s, kwargs: %s", self,
loops, priority, fade_in, volume, kwargs)
if not self.sound_object:
self.load()
if 'sound' in kwargs:
kwargs.pop('sound')
self.track.play(self, priority=priority, loops=loops, volume=volume,
fade_in=fade_in, **kwargs)
def stop(self, fade_out=0, reset=True, **kwargs):
"""Stops this sound playing.
Args:
fade_out: MPF time string for how long this sound will fade out as
it stops.
reset: Boolean for whether this sound should reset its playback
position to the beginning. Default is True.
**kwargs: Catch all since this method might be used as an event
callback which could include random kwargs.
"""
#self.sound_object.stop()
self.track.stop(self)
asset_class = Sound
asset_attribute = 'sounds' # self.machine.<asset_attribute>
#display_element_class = ImageDisplayElement
create_asset_manager = True
path_string = 'sounds'
config_section = 'sounds'
file_extensions = ('ogg', 'wav')
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/media_controller/core/sound.py
|
Python
|
mit
| 33,528
|
[
"Brian"
] |
ca0fb779e49f8cfb0178f907c148411fd7d70af85412d8109adfba3b0279765b
|
"""
Updated by Lin Xiong Oct-30, 2017
Modified by Lin Xiong Oct-31, 2017 (add SE building block)
"""
import argparse,logging,os
import mxnet as mx
from symbol_se_inception_resnet_v2 import get_symbol
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
def multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 110, 120], factor=0.1):
#def multi_factor_scheduler(begin_epoch, epoch_size, step=[15, 30, 45, 60, 75, 90, 115], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def main():
ratio_list = [0.25, 0.125, 0.0625, 0.03125] # 1/4, 1/8, 1/16, 1/32
if args.data_type == "cifar10":
args.aug_level = 1
args.num_classes = 10
symbol = get_symbol(ratio_list[2], args.num_classes)
elif args.data_type == "imagenet":
args.num_classes = 1000
symbol = get_symbol(ratio_list[2], args.num_classes)
else:
raise ValueError("do not support {} yet".format(args.data_type))
kv = mx.kvstore.create(args.kv_store)
devs = mx.cpu() if args.gpus is None else [mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = max(int(args.num_examples / args.batch_size / kv.num_workers), 1)
begin_epoch = args.model_load_epoch if args.model_load_epoch else 0
if not os.path.exists("./model"):
os.mkdir("./model")
model_prefix = "model/se-inception-resnet-v2-{}-{}-{}".format(args.data_type, kv.rank, 0)
checkpoint = mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.retrain:
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.model_load_epoch)
if args.memonger:
import memonger
symbol = memonger.search_plan(symbol, data=(args.batch_size, 3, 32, 32) if args.data_type=="cifar10"
else (args.batch_size, 3, 224, 224))
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "train.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "train_256_q90.rec") if args.aug_level == 1
else os.path.join(args.data_dir, "train_480_q90.rec") ,
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
batch_size = args.batch_size,
pad = 4 if args.data_type == "cifar10" else 0,
fill_value = 127, # only used when pad is valid
rand_crop = True,
max_random_scale = 1.0, # 480 with imagnet, 32 with cifar10
min_random_scale = 1.0 if args.data_type == "cifar10" else 1.0 if args.aug_level == 1 else 0.533, # 256.0/480.0=0.533, 256.0/384.0=0.667 256.0/256=1.0
max_aspect_ratio = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 0.25, # 0.25
random_h = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90
random_s = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
random_l = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
max_random_contrast = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90,
max_random_illumination = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127,
max_rotate_angle = 0 if args.aug_level <= 2 else 10,
max_shear_ratio = 0 if args.aug_level <= 2 else 0.1, #0.1 args.aug_level = 3
rand_mirror = True,
shuffle = True,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "val.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "val_256_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
model = mx.model.FeedForward(
ctx = devs,
symbol = symbol,
arg_params = arg_params,
aux_params = aux_params,
num_epoch = 200 if args.data_type == "cifar10" else 125,
begin_epoch = begin_epoch,
learning_rate = args.lr,
momentum = args.mom,
wd = args.wd,
optimizer = 'nag',
# optimizer = 'sgd',
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=[220, 260, 280], factor=0.1)
if args.data_type=='cifar10' else
multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 110, 120], factor=0.1),
)
model.fit(
X = train,
eval_data = val,
eval_metric = ['acc'] if args.data_type=='cifar10' else
['acc', mx.metric.create('top_k_accuracy', top_k = 5), mx.metric.create('rmse'), mx.metric.create('ce')],
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(args.batch_size, args.frequent),
epoch_end_callback = checkpoint)
# logging.info("top-1 and top-5 acc is {}".format(model.score(X = val,
# eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k = 5)])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="command for training resnet-v2")
parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--data-dir', type=str, default='./data/imagenet/', help='the input data directory')
parser.add_argument('--data-type', type=str, default='imagenet', help='the dataset type')
parser.add_argument('--list-dir', type=str, default='./',
help='the directory which contain the training list file')
parser.add_argument('--lr', type=float, default=0.1, help='initialization learning reate')
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--bn-mom', type=float, default=0.9, help='momentum for batch normlization')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=256, help='the batch size')
parser.add_argument('--workspace', type=int, default=512, help='memory space size(MB) used in convolution, if xpu '
' memory is oom, then you can try smaller vale, such as --workspace 256')
parser.add_argument('--num-classes', type=int, default=1000, help='the class number of your task')
parser.add_argument('--aug-level', type=int, default=2, choices=[1, 2, 3],
help='level 1: use only random crop and random mirror\n'
'level 2: add scale/aspect/hsv augmentation based on level 1\n'
'level 3: add rotation/shear augmentation based on level 2')
parser.add_argument('--num-examples', type=int, default=1281167, help='the number of training examples')
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--model-load-epoch', type=int, default=0,
help='load the model on an epoch using the model-load-prefix')
parser.add_argument('--frequent', type=int, default=50, help='frequency of logging')
parser.add_argument('--memonger', action='store_true', default=False,
help='true means using memonger to save momory, https://github.com/dmlc/mxnet-memonger')
parser.add_argument('--retrain', action='store_true', default=False, help='true means continue training')
args = parser.parse_args()
hdlr = logging.FileHandler('./log/log-se-inception-resnet-v2-{}-{}.log'.format(args.data_type, 0))
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logging.info(args)
main()
|
bruinxiong/SENet.mxnet
|
train_se_inception_resnet_v2.py
|
Python
|
apache-2.0
| 9,164
|
[
"Gaussian"
] |
1d65f6b5d0531909c5f22954fe1b8b7a112c6542a5ce544f136f887ee81bfb14
|
from django.shortcuts import render
from rango.models import Category
from rango.models import Page
from rango.models import User
from rango.forms import CategoryForm
from rango.forms import PageForm
from rango.forms import UserForm, UserProfileForm
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from datetime import datetime
from rango.bing_search import run_query
from django.shortcuts import redirect
@login_required
def restricted(request):
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/restricted.html')
def manageCookies(request, context_dict):
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.now() - last_visit_time).seconds > 0:
# ...reassign the value of the cookie to +1 of what it was before...
visits = visits + 1
# ...and update the last visit cookie, too.
reset_last_visit_time = True
else:
# Cookie last_visit doesn't exist, so create it to the current date/time.
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
return context_dict
def index(request):
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': page_list}
context_dict = manageCookies(request, context_dict)
response = render(request,'rango/index.html', context_dict)
return response
def about(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "sorry :)"}
# If the visits session varible exists, take it and use it.
# If it doesn't, we haven't visited the site so set the count to zero.
if request.session.get('visits'):
context_dict['visits'] = request.session.get('visits')
else:
context_dict['visits'] = 0
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'rango/about/index.html', context_dict)
def category(request, category_name_slug):
context_dict = {}
context_dict['result_list'] = None
context_dict['query'] = None
if request.method == 'POST':
query = request.POST.get('query', '').strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
context_dict['result_list'] = result_list
context_dict['query'] = query
try:
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
pages = Page.objects.filter(category=category).order_by('-views')
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
pass
if not context_dict['query']:
context_dict['query'] = category.name
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
# probably better to use a redirect here.
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form':form, 'category': cat}
return render(request, 'rango/add_page.html', context_dict)
'''
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
username = request.POST['username']
password = request.POST['password']
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/rango/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Rango account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Hey <strong>{0}</strong>, your login details are invalid.<ul><li><a href='../../rango/login/'>Please try again</a>.</li></ul>".format(username))
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'rango/login.html', {})
# Use the login_required() decorator to ensure only those logged in can access the view.
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/')
'''
def restricted(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "Oops :)"}
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'rango/restricted.html', context_dict)
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
def track_url(request):
page_id = None
url = '/rango/'
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
try:
page = Page.objects.get(id=page_id)
page.views = page.views + 1
page.save()
url = page.url
except:
pass
return redirect(url)
@login_required
def register_profile(request):
if request.method == 'POST':
profile_form = UserProfileForm(data=request.POST)
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = User.objects.get(id=request.user.id)
if 'picture' in request.FILES:
try:
profile.picture = request.FILES['picture']
except:
pass
profile.save()
return redirect('index')
else:
profile_form = UserProfileForm()
return render(request, 'registration/profile_registration.html', {'profile_form': profile_form})
@login_required
def profile(request, user_id = None):
if user_id is not None:
context_dict = {'user': User.objects.get(id=user_id)}
else:
context_dict = {'user': User.objects.get(id=request.user.id)}
try:
context_dict['profile'] = UserProfile.objects.get(user=context_dict['user'])
except:
context_dict['profile'] = None
context_dict['myprofile'] = user_id is None or user_id == request.user.id
return render(request, 'registration/profile.html', context_dict)
@login_required
def edit_profile(request):
try:
users_profile = UserProfile.objects.get(user=request.user)
except:
users_profile = None
if request.method == 'POST':
profile_form = UserProfileForm(data=request.POST, instance=users_profile)
if profile_form.is_valid():
profile_updated = profile_form.save(commit=False)
if users_profile is None:
profile_updated.user = User.objects.get(id=request.user.id)
if 'picture' in request.FILES:
try:
profile_updated.picture = request.FILES['picture']
except:
pass
profile_updated.save()
return redirect('profile')
else:
form = UserProfileForm(instance=users_profile)
return render(request, 'registration/profile_edit.html', {'profile_form': form})
@login_required
def user_list(request):
users = User.objects.all()
return render(request, 'registration/user_list.html', {'users': users})
def bad_url(request):
return render(request, 'rango/nopage.html')
|
2027205T/tango_with_django
|
tango_with_django_project/rango/views.py
|
Python
|
mit
| 13,387
|
[
"VisIt"
] |
83483ff6d67a9bbff078602d046d1b6033f8a89f573d5e4b7d5a58b62c86ebd8
|
''' COSMO-VIEW,
Quim Ballabrera, May 2017
Script for visualizing model outputs provided by various operational
systems
EGL, 06/2020: Changes:
No more support to python 2.7
Support to Basemap deprecated and updated to Cartopy system
Limited support to geographical projections. Everything is
plotted in PlateCarree and data are supposed to be provided
in geodetic (lon,lat) values.
A heap variable MESSAGE has been introduce to store "print" messages
'''
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
import dateutil.parser as dparser
import numpy as np
import datetime
import json
import os
import io
import urllib.request
from netCDF4 import Dataset,num2date
try:
to_unicode = unicode
except:
to_unicode = str
#import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import cosmo.lineplot as lineplot
from cosmo.tools import toconsola
from cosmo.tools import exists
from cosmo.tools import caldat
from cosmo.tools import Select_Columns
from cosmo import COSMO_CONF_PATH
from cosmo import COSMO_CONF_DATA
#EG To manage cartopy projections
#EG from cosmo.tools import map_proj
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2018"
# =================
class parameters():
# =================
''' Class for Lagrangian floats'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2018"
def __init__ (self, wid=False):
# ==================
''' Define ans initialize class attrobutes'''
self.MESSAGE = ""
with open(COSMO_CONF_DATA) as infile:
conf = json.load(infile)
COSMO_CONF_NAME = conf['COSMO_CONF_NAME']
COSMO_CONF = COSMO_CONF_PATH + COSMO_CONF_NAME + os.sep
self.FILECONF = COSMO_CONF + 'trajectory.conf'
self.FILENAME = tk.StringVar()
self.SOURCE = 'FILE'
self.ALIAS = tk.StringVar()
self.show = tk.BooleanVar()
self.I = tk.IntVar()
self.L = tk.IntVar()
self.L1 = tk.IntVar()
self.L2 = tk.IntVar()
self.SEPARATED_COLOR = tk.BooleanVar()
self.FLOAT_COLOR = []
self.FLOAT_SHOW = []
self.FLOAT_ZORDER = []
self.CROP = tk.BooleanVar()
self.PLOT = lineplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.nfloats = 0
self.nrecords = 0
self.lon = []
self.lat = []
self.DATE = []
self.TIME = []
self.speed = []
self.SOURCE = 'FILE'
self.I.set(0)
self.L.set(0)
self.PLOT.LINE_COLOR.set('blue')
self.SEPARATED_COLOR.set(False)
self.show.set(True)
self.ALIAS.set('')
self.CROP.set(False)
self.Fx = None
self.Fy = None
self.LINK = tk.BooleanVar()
self.LINK.set(False)
self.cons = wid
#if exists(self.FILECONF):
# print('Reading Lagrangian configuration file '+self.FILECONF)
# try:
# conf = self.conf_load(self.FILECONF)
# self.conf_set(conf)
# except:
# print('Error reading, using default parameters')
# conf = self.conf_get()
# self.conf_save(conf,self.FILECONF)
#else:
# print('Saving Lagrangian configuration file ',self.FILECONF)
# conf = self.conf_get()
# self.conf_save(conf,self.FILECONF)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['NFLOATS'] = self.nfloats
conf['SOURCE'] = self.SOURCE
conf['ALIAS'] = self.ALIAS.get()
conf['SHOW'] = self.show.get()
conf['I'] = self.I.get()
conf['L'] = self.L.get()
conf['L1'] = self.L1.get()
conf['L2'] = self.L2.get()
conf['SEPARATED_COLOR'] = self.SEPARATED_COLOR.get()
FLOAT_COLOR = []
FLOAT_SHOW = []
FLOAT_ZORDER = []
try:
for i in range(self.nfloats):
FLOAT_COLOR.append(self.FLOAT_COLOR[i].get())
FLOAT_SHOW.append(self.FLOAT_SHOW[i].get())
FLOAT_ZORDER.append(self.FLOAT_ZORDER[i].get())
except:
for i in range(self.nfloats):
FLOAT_COLOR.append(self.PLOT.LINE_COLOR.get())
FLOAT_SHOW.append(self.show.get())
FLOAT_ZORDER.append(self.PLOT.ZORDER.get())
conf['FLOAT_COLOR'] = FLOAT_COLOR.copy()
conf['FLOAT_SHOW'] = FLOAT_SHOW.copy()
conf['FLOAT_ZORDER'] = FLOAT_ZORDER.copy()
conf['CROP'] = self.CROP.get()
conf['LINK'] = self.LINK.get()
conf['PLOT'] = self.PLOT.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.nfloats = conf['NFLOATS']
self.SOURCE = conf['SOURCE']
self.ALIAS.set(conf['ALIAS'])
self.show.set(conf['SHOW'])
self.I.set(conf['I'])
self.L.set(conf['L'])
self.L1.set(conf['L1'])
self.L2.set(conf['L2'])
self.SEPARATED_COLOR.set(conf['SEPARATED_COLOR'])
self.FLOAT_COLOR = []
self.FLOAT_SHOW = []
self.FLOAT_ZORDER = []
try:
for i in range(self.nfloats):
self.FLOAT_COLOR.append(tk.StringVar(value=conf['FLOAT_COLOR'][i]))
self.FLOAT_SHOW.append(tk.BooleanVar(value=conf['FLOAT_SHOW'][i]))
self.FLOAT_ZORDER.append(tk.IntVar(value=conf['FLOAT_ZORDER'][i]))
except:
for i in range(self.nfloats):
self.FLOAT_COLOR.append(tk.StringVar(value=self.PLOT.LINE_COLOR.get()))
self.FLOAT_SHOW.append(tk.BooleanVar(value=self.show.get()))
self.FLOAT_ZORDER.append(tk.IntVar(value=self.PLOT.ZORDER.get()))
self.CROP.set(conf['CROP'])
self.LINK.set(conf['LINK'])
self.PLOT.conf_set(conf['PLOT'])
def conf_load(self,filename):
# ===========================
'''Open an read the configuration file'''
# Read configuration
with open(filename) as infile:
conf = json.load(infile)
return conf
def conf_save(self,conf,filename):
# ===============================
'''Save the configuration file'''
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
outfile.close()
def Read(self,filename):
# ======================
'''Opens and reads a trajectory file'''
__version__ = "0.3"
__author__ = "Quim Ballabrera"
__date__ = "February 2018"
self.FILENAME.set(filename)
# --------------------------------------
def read_trajectory_ncdf(filename):
# --------------------------------------
'''Read a set of trajectories from a netcdf file'''
#EG
self.MESSAGE +='\n Reading netcdf file - '+filename
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
ncid = Dataset(filename)
self.nfloats = ncid.dimensions['floats'].size
self.nrecords = ncid.dimensions['time'].size
# Check time dimensions:
# In BLM files, the time variables has a unique dimension.
# In MLM files, the time variables has two dimensions.
#
if len(ncid.variables['time'].shape) == 1:
self.SOURCE = 'blm'
elif len(ncid.variables['time'].shape) == 2:
self.SOURCE = 'mlm'
else:
#EG
self.MESSAGE +='\nError: Unknown Trajectory source'
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
return
try:
self.lon = ncid.variables['lon'][:,:].squeeze()
except:
self.lon = ncid.variables['longitude'][:,:]
try:
self.lat = ncid.variables['lat'][:,:].squeeze()
except:
self.lat = ncid.variables['latitude'][:,:]
# If some longitudes have missing values, we set them
# equal to nan:
#
try:
self.lon = self.lon.filled(fill_value=np.nan)
except:
pass
try:
self.lat = self.lat.filled(fill_value=np.nan)
except:
pass
# Get the date of the floats:
#
if self.SOURCE == 'blm':
# ------------------------
Time_blm = ncid.variables['time'][:]
Time_units = ncid.variables['time'].units
Time_calendar = ncid.variables['time'].calendar
self.DATE = []
for i in range(self.nrecords):
tmp = num2date(Time_blm[i], \
units=Time_units, \
calendar=Time_calendar)
s = tmp.strftime('%Y-%m-%d %H:%M:%S')
d = datetime.datetime.strptime(s,'%Y-%m-%d %H:%M:%S')
self.DATE.append(d)
#Time_jd = ncid.variables['time'][:]
#self.DATE = []
#for i in range(self.nrecords):
# a = caldat(Time_jd[i])
# self.DATE.append(datetime.datetime(a[0],a[1],a[2],a[3],a[4],a[5]))
elif self.SOURCE == 'mlm':
# ------------------------
Time_jd = ncid.variables['time'][:,:]
Time_jd[Time_jd>1e10] = np.nan
self.DATE = []
for j in range(ncid.variables['time'].shape[0]):
tmpdate = []
for i in range(self.nfloats):
a = caldat(Time_jd[j,i])
if np.isnan(a[0]):
tmpdate.append(datetime.datetime(6050,1,1,12,0,0))
else:
tmpdate.append(datetime.datetime(a[0],a[1],a[2],a[3],a[4],a[5]))
self.DATE.append(tmpdate)
# --------------------------------------
def read_trajectory_json(filename):
# --------------------------------------
'''Read a trajectory from a json file'''
import json
if filename[0:5].lower() == 'http:':
self.MESSAGE +='\nReading remote json file.. '+filename.split("/")[-1]
self.MESSAGE +='\nPath: '+'/'.join(filename.split("/")[:-1])+"/n"
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
response = urllib.request.urlopen(filename)
data = response.read()
text = data.decode('utf-8')
DATA = json.loads(text)
else:
self.MESSAGE +='\nReading local json file.. '+filename.split("/")[-1]
self.MESSAGE +='\nPath: '+'/'.join(filename.split("/")[:-1])+"/n"
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
with open(filename) as datafile:
DATA = json.load(datafile)
nfeatures = len(DATA["features"])
self.MESSAGE +="\nNumber of features: "+str(nfeatures)
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
# Detect the GEOJSON MODE
# In the "Dated LineString", the date is stored in the property "time"
# of the trajectory
# In the "Undated LineString", the date is stored in the points
#
pline = [DATA["features"][i]["geometry"]["type"] \
for i in range(nfeatures)].index("LineString")
try:
nl = DATA["features"][pline]["properties"]["time"]["data"]
fileFormat = "Dated LineString"
except:
fileFormat = "Undated LineString"
self.lon = []
self.lat = []
self.DATE = []
if fileFormat == "Undated LineString":
for i in range(nfeatures):
if DATA["features"][i]["geometry"]["type"] == "Point":
a = DATA["features"][i]["geometry"]["coordinates"]
b = DATA["features"][i]["properties"]["time"]
self.lon.append(a[0])
self.lat.append(a[1])
self.DATE.append(datetime.datetime.strptime(b, \
'%Y-%m-%dT%H:%M:%SZ'))
elif fileFormat == "Dated LineString":
POINTS = DATA["features"][pline]["geometry"]["coordinates"]
DATES = DATA["features"][pline]["properties"]["time"]["data"]
for i in range(len(DATES)):
self.lon.append(POINTS[i][0])
self.lat.append(POINTS[i][1])
#self.DATE.append(datetime.datetime.strptime(DATES[i], \
# '%Y-%m-%dT%H:%M:%SZ'))
self.DATE.append(datetime.datetime.strptime(DATES[i][:19], \
'%Y-%m-%dT%H:%M:%S'))
else:
self.MESSAGE +='Unknown GEOJSON file format'
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
self.nfloats = 1
self.nrecords = len(self.lon)
self.lon = np.array(self.lon)
self.lat = np.array(self.lat)
# --------------------------------------
def read_trajectory_txt(filename):
# --------------------------------------
'''Read a trajectory from a txt file'''
with open(filename,'r') as f:
first_line = f.readline()
self.MESSAGE +='\nReading txt file '+filename
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
win = tk.Toplevel()
win.title('Float column')
Axes = Select_Columns(win,first_line,' ')
win.wait_window()
self.nfloats = None
self.nrecords = None
self.lon = []
self.lat = []
self.DATE = []
if Axes.lon is None:
return
if Axes.lat is None:
return
with open(filename) as datafile:
for line in datafile.readlines():
line = line.strip()
line = ' '.join(line.split())
columns = line.split(Axes.SEPARATOR.get())
self.lon.append(float(columns[Axes.lon]))
self.lat.append(float(columns[Axes.lat]))
if Axes.type == 0:
year = int(columns[Axes.year])
month = int(columns[Axes.month])
day = int(columns[Axes.day])
if Axes.hour is None:
hour = 0
else:
hour = int(columns[Axes.hour])
if Axes.minute is None:
minute = 0
else:
minute = int(columns[Axes.minute])
if Axes.second is None:
second = 0
else:
second = int(columns[Axes.second])
self.DATE.append(datetime.datetime(year,month,day, \
hour,minute,second))
elif Axes.type == 1:
self.DATE.append(datetime.datetime.strptime(columns[Axes.date],Axes.fmt))
elif Axes.type == 2:
self.DATE.append(datetime.datetime.strptime(columns[Axes.date]+ \
'T'+columns[Axes.time],Axes.fmt))
elif Axes.type == 3:
jd = float(columns[Axes.jday])
a = caldat(jd)
self.DATE.append(datetime.datetime(a[0],a[1],a[2],a[3],a[4],a[5]))
else:
self.MESSAGE +='unknown ASCII file format'
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
return
self.nfloats = 1
self.nrecords = len(self.lon)
self.lon = np.array(self.lon)
self.lat = np.array(self.lat)
filename = self.FILENAME.get()
if filename.lower().endswith(('.nc','.cdf','.ncdf')):
read_trajectory_ncdf(filename)
elif filename.lower().endswith(('.geojson','.json')):
read_trajectory_json(filename)
else:
read_trajectory_txt(filename)
# elif filename.lower().endswith(('.txt')):
# read_trajectory_txt(filename)
#
# elif filename.lower().endswith(('.csv')):
# print('csv: not yet coded')
# self.nfloats = None
# self.nrecords = None
#
# elif filename.lower().endswith(('.dat','.data')):
# print('ascii data: not yet coded')
# self.nfloats = None
# self.nrecords = None
# Cheack that something has been read:
if self.nfloats is None:
self.MESSAGE +='\nReading txt file.. '+filename.split("/")[-1]
self.MESSAGE +='\nPath: '+'/'.join(filename.split("/")[:-1])
if self.cons: toconsola(self.MESSAGE, wid=self.cons)
else: print(self.MESSAGE)
self = None
return
self.TIME = []
for i in range(self.nrecords):
self.TIME.append(self.DATE[i].timestamp())
self.TIME = np.array(self.TIME)
self.DATE = np.array(self.DATE)
# If we have data, we fill some fields to their default value.
self.I.set(0)
self.L.set(0)
self.L1.set(0)
self.L2.set(self.nrecords-1)
self.FLOAT_COLOR = []
self.FLOAT_SHOW = []
self.FLOAT_ZORDER = []
for i in range(self.nfloats):
self.FLOAT_COLOR.append(tk.StringVar(value=self.PLOT.LINE_COLOR.get()))
self.FLOAT_SHOW.append(tk.BooleanVar(value=True))
self.FLOAT_ZORDER.append(tk.IntVar(value=self.PLOT.ZORDER.get()))
# =======================
def drawing(ax,proj,FLT):
# ======================================
''' Draw a 2D vector plot. Option of vectors or stream function'''
__version__ = "1.0"
__author__ = "Quim Ballabrerera"
__date__ = "January 2018"
if FLT.nfloats == 0:
return
if not FLT.show.get():
return
#EG recover the cartopy projection
#EG projection = map_proj(proj)
r1 = FLT.L1.get()
r2 = FLT.L2.get() + 1
for i in range(FLT.nfloats): # Loop over buoys
if FLT.SEPARATED_COLOR.get():
color = FLT.FLOAT_COLOR[i].get()
visible = FLT.FLOAT_SHOW[i].get()
zorder = FLT.FLOAT_ZORDER[i].get()
else:
color = FLT.PLOT.LINE_COLOR.get()
visible = FLT.show.get()
zorder = FLT.PLOT.ZORDER.get()
if FLT.nfloats == 1:
xx, yy = FLT.lon[r1:r2],FLT.lat[r1:r2]
else:
xx, yy = FLT.lon[r1:r2,i],FLT.lat[r1:r2,i]
if FLT.PLOT.LINE_SHOW.get():
ax.plot(xx,yy,FLT.PLOT.LINE_STYLE.get(), \
linewidth=FLT.PLOT.LINE_WIDTH.get(), \
transform=proj, \
alpha=FLT.PLOT.ALPHA.get(), \
zorder=zorder, \
visible=visible, \
color=color)
if FLT.PLOT.MARKER_SHOW.get():
ax.plot(xx,yy,FLT.PLOT.MARKER_STYLE.get(), \
ms=FLT.PLOT.MARKER_SIZE.get(), \
transform=proj, \
alpha=FLT.PLOT.ALPHA.get(), \
zorder=zorder, \
visible=visible, \
color=FLT.PLOT.MARKER_COLOR.get())
if FLT.PLOT.INITIAL_SHOW.get():
# Check that we get the first valid position:
#
vr1 = r1
for i in range(r1,r2):
if np.isnan(xx[i]) or np.isnan(yy[i]):
pass
else:
vr1 = i
break
ax.plot(xx[vr1],yy[vr1], \
FLT.PLOT.INITIAL_STYLE.get(), \
ms=FLT.PLOT.INITIAL_SIZE.get(), \
transform=proj, \
alpha=FLT.PLOT.ALPHA.get(), \
zorder=zorder, \
visible=visible, \
color=FLT.PLOT.INITIAL_COLOR.get())
if FLT.PLOT.ONMAP_SHOW.get():
L = FLT.L.get()
if FLT.nfloats == 1:
xx,yy = FLT.MAPX[L], FLT.MAPY[L]
else:
xx,yy = FLT.MAPX[L][i], FLT.MAPY[L][i]
ax.plot(xx,yy, \
FLT.PLOT.ONMAP_STYLE.get(), \
ms=FLT.PLOT.ONMAP_SIZE.get(), \
transform=proj, \
alpha=FLT.PLOT.ALPHA.get(), \
zorder=zorder, \
visible=visible, \
color=FLT.PLOT.ONMAP_COLOR.get())
# =======================
def ShowData(master,LL):
# ======================================
''' Shows data from a Lagrangian Trajectory'''
__version__ = "1.0"
__author__ = "Quim Ballabrerera"
__date__ = "January 2018"
def iselection(LL):
# =================
log = tk.Text(master)
log.grid(row=0,column=0,padx=10,pady=10,sticky='nsew')
log.grid_columnconfigure(0,weight=1)
log.grid_rowconfigure(0,weight=1)
# Scrollbar
scrollb = tk.Scrollbar(master,command=log.yview)
scrollb.grid(row=0,column=1,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
if LL.SOURCE == 'blm':
if LL.nfloats == 1:
for l in range(LL.nrecords):
string = '\t {} \t {: 7.3f} \t {: 7.3f} \t {} \n'.format(l, \
LL.lon[l], \
LL.lat[l], \
LL.DATE[l])
log.insert('end',string)
else:
i = int(LL.I.get())
for l in range(LL.nrecords):
string = '\t {} \t {: 7.3f} \t {: 7.3f} \t {} \n'.format(l, \
LL.lon[l][i], \
LL.lat[l][i], \
LL.DATE[l])
log.insert('end',string)
elif LL.SOURCE == 'mlm':
i = int(LL.I.get())
for l in range(LL.nrecords):
string = '\t {} \t {: 7.3f} \t {: 7.3f} \t {} \n'.format(l, \
LL.lon[l][i], \
LL.lat[l][i], \
LL.DATE[l][i])
log.insert('end',string)
iselection(LL)
F0 = ttk.Frame(master)
ttk.Label(F0,text='Floater:').grid(row=0,column=0,padx=3)
ibox = ttk.Combobox(F0,textvariable=LL.I,width=5)
ibox.grid(row=0,column=1)
ibox.bind('<<ComboboxSelected>>',lambda e: iselection(LL))
ibox['values'] = list(range(LL.nfloats))
if LL.nfloats == 1:
ibox.configure(state='disabled')
else:
ibox.configure(state='!disabled')
F0.grid()
# =============
def editor(LL):
# =============
global Deploy_date
global Recover_date
global REJECT
global Station_pointer
global NRECORDS
BACKUP_lon = LL.lon.copy()
BACKUP_lat = LL.lat.copy()
BACKUP_date = LL.DATE.copy()
REJECT = []
for i in range(LL.nrecords):
REJECT.append(tk.BooleanVar(value=False))
Deploy_date = tk.StringVar()
Recover_date = tk.StringVar()
Station_pointer = tk.IntVar()
NRECORDS = tk.IntVar()
Deploy_date.set(LL.DATE[0].__str__())
Recover_date.set(LL.DATE[LL.nrecords-1].__str__())
Station_pointer.set(0)
NRECORDS.set(LL.nrecords)
def _close():
# ===========
win.destroy()
def _cancel():
# ===========
LL.lon = BACKUP_lon.copy()
LL.lat = BACKUP_lat.copy()
LL.DATE = BACKUP_date.copy()
win.destroy()
def _deploytime():
# ================
''' Reject positions before the specified date'''
print('In deploy date: ', Deploy_date.get())
t0 = dparser.parse(Deploy_date.get())
for i in range(LL.nrecords):
if LL.DATE[i] < t0:
REJECT[i].set(True)
def _recovertime():
# =================
''' Reject positions after the specified date'''
print('In recover date: ', Recover_date.get())
t0 = dparser.parse(Recover_date.get())
for i in range(LL.nrecords):
if LL.DATE[i] > t0:
REJECT[i].set(True)
def _purge():
# =================
''' Purge rejected positions'''
def _station_up():
# =================
''' Purge rejected positions'''
def _station_down():
# =================
''' Purge rejected positions'''
def _reject_before():
# =================
''' Purge previous positions'''
def _reject_after():
# =================
''' Purge following positions'''
def _reject_this():
# =================
''' Reject a position'''
def _save():
# =================
''' Save valid positions'''
win = tk.Toplevel()
win.title('Trajectory Editor')
win.resizable(False,False)
win.protocol('WM_DELETE_WINDOW',_cancel)
#global Deploy_date
#global Recover_date
# Define tabs:
nb = ttk.Notebook(win)
page1 = ttk.Frame(nb)
page2 = ttk.Frame(nb)
nb.add(page1,text='Canvas')
nb.add(page2,text='Attributes')
F0 = ttk.Frame(page1,padding=5)
ttk.Label(F0,text='Filename',padding=5).grid(row=0,column=0,sticky='w')
ttk.Entry(F0,textvariable=LL.FILENAME,justify='left',width=80).\
grid(row=0,column=1,columnspan=8,sticky='ew')
F0.grid()
F1 = ttk.Frame(page1,padding=5)
ttk.Label(F1,text='Deployment date = ').grid(row=0,column=0,
columnspan=2,
padx=3,sticky='w')
wdeploy = ttk.Entry(F1,textvariable=Deploy_date,
justify='left',width=18)
wdeploy.grid(row=0,column=2,columnspan=2,sticky='w')
wdeploy.bind("<Return>", lambda f: _deploytime())
ttk.Label(F1,text='Recovery date = ').grid(row=0,column=4,
columnspan=2,
padx=3,sticky='w')
wrecover = ttk.Entry(F1,textvariable=Recover_date,
justify='left',width=18)
wrecover.grid(row=0,column=6,columnspan=2,sticky='w')
wrecover.bind('<Return>', lambda f: _recovertime())
F1.grid(sticky='w')
fig = Figure(dpi=150)
ax1 = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig,master=page1)
canvas.show()
canvas.get_tk_widget().grid(sticky='nsew')
canvas._tkcanvas.grid()
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
# Bottom menu
F2 = ttk.Frame(page1,padding=5)
ttk.Button(F2,text='+',command=_station_up,padding=3). \
grid(row=0,column=0)
ttk.Label(F2,text='station = ',padding=3).grid(row=0,column=1)
wstat = ttk.Entry(F2,textvariable=Station_pointer)
wstat.grid(row=0,column=2,columnspan=2)
wstat.bind('<Return>', lambda f: station_manual())
ttk.Label(F2,text='/ ',padding=3).grid(row=0,column=4)
ttk.Entry(F2,textvariable=NRECORDS,
state='readonly').grid(row=0,column=5,columnspan=2)
ttk.Checkbutton(F2,text='Reject',command=_reject_this, \
variable=REJECT[Station_pointer.get()]).grid(row=0,column=7)
ttk.Button(F2,text='Reject stations before this', \
command=_reject_before).grid(row=0,column=8,columnspan=2)
ttk.Button(F2,text='Reject stations after this', \
command=_reject_after).grid(row=0,column=10,columnspan=2)
ttk.Button(F2,text='-',command=_station_down,padding=3). \
grid(row=1,column=0)
ttk.Label(F2,text='Date = ',padding=3).grid(row=1,column=1)
ttk.Entry(F2,value='Katachin').grid(row=1,column=2,columnspan=2)
#ttk.Entry(F2,textvariable=Station_date).grid(row=1,column=2,columnspan=2)
#ttk.Label(F2,text='Longitude = ',padding=3).grid(row=1,column=4)
#ttk.Entry(F2,textvariable=Station_lon).grid(row=1,column=5,columnspan=2)
#ttk.Label(F2,text='Latitude = ',padding=3).grid(row=1,column=7)
#ttk.Entry(F2,textvariable=Station_lat).grid(row=1,column=8,columnspan=2)
#ttk.Label(F2,text='Speed = ',padding=3).grid(row=1,column=10)
#ttk.Entry(F2,textvariable=Station_speed).grid(row=1,column=11,columnspan=2)
ttk.Button(F2,text='Purge',command=_purge).grid(row=2,column=10)
ttk.Button(F2,text='Save as',command=_save).grid(row=2,column=11)
ttk.Button(F2,text='Close',command=_close).grid(row=2,column=12)
F2.grid(sticky='ew')
nb.grid()
#win.wait_window(win)
def main():
# =========
root = tk.Tk()
nn = filedialog.askopenfile()
if nn is None:
quit()
filename = '%s' % nn.name
#filename = 'trajectory_20171122.nc'
#filename = 'histo-300234060640350.txt'
root.title(filename)
root.resizable(width=False,height=False)
root.rowconfigure(0,weight=1)
tr = parameters()
tr.Read(filename)
ShowData(root,tr)
root.mainloop()
if __name__ == '__main__':
main()
|
quimbp/cosmo
|
modules/cosmo/lagrangian.py
|
Python
|
mit
| 28,012
|
[
"NetCDF"
] |
48f3093a1733cd2076c96daf45798bf31e6c1a3ce024a88194d6466cd9daba23
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2006, 2010 Hervé Cauwelier <herve@oursours.net>
# Copyright (C) 2006-2007, 2009-2011 J. David Ibáñez <jdavid.ibp@gmail.com>
# Copyright (C) 2007 Sylvain Taverne <taverne.sylvain@gmail.com>
# Copyright (C) 2009 David Versmisse <versmisse@lil.univ-littoral.fr>
# Copyright (C) 2009 Dumont Sébastien <sebastien.dumont@itaapy.com>
# Copyright (C) 2016 Sylvain Taverne <taverne.sylvain@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
from copy import deepcopy
from os.path import islink, exists, isdir
from subprocess import Popen
from json import dumps
# Import from itools
from itools.fs import lfs
from itools.gettext import POFile
from itools.html import XHTMLFile, HTMLFile
from itools.xmlfile.errors import TranslationError
# Import from here
from build_gulp import GulpBuilder
from git import open_worktree
def get_manifest():
worktree = open_worktree('.')
return [ x for x in worktree.get_filenames() if not x.startswith('.')]
def make(worktree, rules, manifest, package_root, po_files):
for source in deepcopy(manifest):
# Exclude
if 'docs/' in source:
continue
# Apply rules
for source_ext, target_ext, f, handler_cls in rules:
if source.endswith(source_ext):
target = source[:-len(source_ext)] + target_ext
print(target)
# Compile
f(package_root, source, target, handler_cls, po_files)
# Update manifest
manifest.add(target)
# PO => MO
def po2mo(package_root, source, target, handler_cls, po_files):
Popen(['msgfmt', source, '-o', target])
# Translate templates
def make_template(package_root, source, target, handler_cls, po_files):
# Get file
source_handler = handler_cls(source)
language = target.rsplit('.', 1)[1]
po = po_files[language]
try:
data = source_handler.translate(po)
except TranslationError as e:
# Override source and language
raise TranslationError(line=e.line, source_file=source, language=language)
with open(target, 'w') as f:
f.write(data)
def get_file_path(package_root, filename):
if package_root == '.':
return filename
return package_root + '/' + filename
def get_package_version(package_root):
path = get_file_path(package_root, 'version.txt')
if exists(path):
version = open(path).read().strip()
else:
version = None
return version
def make_version(worktree):
"""This function finds out the version number from the source, this will
be written to the 'version.txt' file, which will be read once the software
is installed to get the version number.
"""
# Get the git description
tag = None
description = worktree.git_describe()
# The version name
if description:
# n represent the number of commit between the tag and the ref
tag, n, commit = description
if n == 0:
# Exact match
return tag
# Try to get the branch
branch = worktree.get_branch_name()
branch = branch or 'nobranch'
if tag and tag.startswith(branch):
branch = tag
# Get the timestamp
try:
head = worktree.get_metadata()
timestamp = head['committer_date']
timestamp = timestamp.strftime('%Y%m%d%H%M')
except KeyError:
# XXX bug in docker ?
timestamp = 'notimestamp'
# Build a version from the branch and the timestamp
return '{}.dev{}'.format(branch, timestamp)
def build(path, config, environment):
# Get version path
package_root = config.get_value('package_root')
version_txt = get_file_path(package_root, 'version.txt')
# Get git worktree
try:
worktree = open_worktree(path)
except KeyError:
worktree = None
# If not in a git repostory, get package version
if worktree is None:
return get_package_version(package_root)
# Find out the version string
version = make_version(worktree)
# Initialize the manifest file (ignore links & submodules)
manifest = set([ x for x in get_manifest() if not islink(x) and not isdir(x)])
manifest.add('MANIFEST')
# Write version
open(path + version_txt, 'w').write(version)
print("**"*30)
print("* Version: {}".format(version))
manifest.add(version_txt)
# Write environment.json file
environment_json = get_file_path(package_root, 'environment.json')
environment_kw = {'build_path': path, 'environment': environment}
open(path + environment_json, 'w').write(dumps(environment_kw))
manifest.add(environment_json)
print("* Build environment.json")
# Run gulp
if environment == 'production':
gulp_builder = GulpBuilder(package_root, worktree, manifest)
gulp_builder.run()
# Rules
rules = [('.po', '.mo', po2mo, None)]
# Pre-load PO files
po_files = {}
for dst_lang in config.get_value('target_languages'):
po = POFile('%s/locale/%s.po' % (package_root, dst_lang))
po_files[dst_lang] = po
# Templates
src_lang = config.get_value('source_language', default='en')
for dst_lang in config.get_value('target_languages'):
rules.append(
('.xml.%s' % src_lang, '.xml.%s' % dst_lang, make_template, XHTMLFile))
rules.append(
('.xhtml.%s' % src_lang, '.xhtml.%s' % dst_lang, make_template, XHTMLFile))
rules.append(
('.html.%s' % src_lang, '.html.%s' % dst_lang, make_template, HTMLFile))
# Make
make(worktree, rules, manifest, package_root, po_files)
# Write the manifest
lines = [ x + '\n' for x in sorted(manifest) ]
open(path + 'MANIFEST', 'w').write(''.join(lines))
print('* Build MANIFEST file (list of files to install)')
print('**'*30)
return version
|
bepatient-fr/itools
|
itools/pkg/build.py
|
Python
|
gpl-3.0
| 6,574
|
[
"GULP"
] |
7b27c8d9aa57633c6fed4e6e6b8834382071958a30d59cabc5a9ec224a912ea6
|
"""
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
Interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
Find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (spectrum over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in MATLAB, but are useful anyway:
:func:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:func:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:func:`contiguous_regions`
Return the indices of the regions spanned by some logical mask
:func:`cross_from_below`
Return the indices where a 1D array crosses a threshold from below
:func:`cross_from_above`
Return the indices where a 1D array crosses a threshold from above
:func:`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
:func:`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
:func:`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
:func:`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
:func:`detrend_mean`
Remove the mean from a line.
:func:`demean`
Remove the mean from a line. This function is the same as as
:func:`detrend_mean` except for the default *axis*.
:func:`detrend_linear`
Remove the best fit line from a line.
:func:`detrend_none`
Return the original line.
:func:`stride_windows`
Get all windows in an array in a memory-efficient manner
:func:`stride_repeat`
Repeat an array in a memory-efficient manner
:func:`apply_window`
Apply a window along a given axis
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:func:`rec2txt`
Pretty print a record array
:func:`rec2csv`
Store record array in CSV file
:func:`csv2rec`
Import record array from CSV file with type inspection
:func:`rec_append_fields`
Adds field(s)/array(s) to record array
:func:`rec_drop_fields`
Drop fields from record array
:func:`rec_join`
Join two record arrays on sequence of fields
:func:`recs_join`
A simple join of multiple recarrays using a single column as a key
:func:`rec_groupby`
Summarize data by groups (similar to SQL GROUP BY)
:func:`rec_summarize`
Helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import map, xrange, zip
import copy
import csv
import operator
import os
import warnings
import numpy as np
from matplotlib import verbose
import matplotlib.cbook as cbook
from matplotlib import docstring
from matplotlib.path import Path
import math
ma = np.ma
if six.PY3:
long = int
def logspace(xmin, xmax, N):
'''
Return N values logarithmically spaced between xmin and xmax.
Call signature::
logspace(xmin, xmax, N)
'''
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
'''
Return sqrt(x dot x).
Call signature::
_norm(x)
'''
return np.sqrt(np.dot(x, x))
def window_hanning(x):
'''
Return x times the hanning window of len(x).
Call signature::
window_hanning(x)
.. seealso::
:func:`window_none`
:func:`window_none` is another window algorithm.
'''
return np.hanning(len(x))*x
def window_none(x):
'''
No window function; simply return x.
Call signature::
window_none(x)
.. seealso::
:func:`window_hanning`
:func:`window_hanning` is another window algorithm.
'''
return x
def apply_window(x, window, axis=0, return_window=None):
'''
Apply the given window to the given 1D or 2D array along the given axis.
Call signature::
apply_window(x, window, axis=0, return_window=False)
*x*: 1D or 2D array or sequence
Array or sequence containing the data.
*winodw*: function or array.
Either a function to generate a window or an array with length
*x*.shape[*axis*]
*axis*: integer
The axis over which to do the repetition.
Must be 0 or 1. The default is 0
*return_window*: bool
If true, also return the 1D values of the window that was applied
'''
x = np.asarray(x)
if x.ndim < 1 or x.ndim > 2:
raise ValueError('only 1D or 2D arrays can be used')
if axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
xshape = list(x.shape)
xshapetarg = xshape.pop(axis)
if cbook.iterable(window):
if len(window) != xshapetarg:
raise ValueError('The len(window) must be the same as the shape '
'of x for the chosen axis')
windowVals = window
else:
windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
if x.ndim == 1:
if return_window:
return windowVals * x, windowVals
else:
return windowVals * x
xshapeother = xshape.pop()
otheraxis = (axis+1) % 2
windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
if return_window:
return windowValsRep * x, windowVals
else:
return windowValsRep * x
def detrend(x, key=None, axis=None):
'''
Return x with its trend removed.
Call signature::
detrend(x, key='mean')
*x*: array or sequence
Array or sequence containing the data.
*key*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or function
Specifies the detrend algorithm to use. 'default' is 'mean',
which is the same as :func:`detrend_mean`. 'constant' is the same.
'linear' is the same as :func:`detrend_linear`. 'none' is the same
as :func:`detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms.
Can also be a function that carries out the detrend operation.
*axis*: integer
The axis along which to do the detrending.
.. seealso::
:func:`detrend_mean`
:func:`detrend_mean` implements the 'mean' algorithm.
:func:`detrend_linear`
:func:`detrend_linear` implements the 'linear' algorithm.
:func:`detrend_none`
:func:`detrend_none` implements the 'none' algorithm.
'''
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif cbook.is_string_like(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
if not callable(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
def demean(x, axis=0):
'''
Return x minus its mean along the specified axis.
Call signature::
demean(x, axis=0)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
:func:`denone`
:func:`delinear` and :func:`denone` are other detrend algorithms.
:func:`detrend_mean`
This function is the same as as :func:`detrend_mean` except
for the default *axis*.
'''
return detrend_mean(x, axis=axis)
def detrend_mean(x, axis=None):
'''
Return x minus the mean(x).
Call signature::
detrend_mean(x, axis=None)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`demean`
This function is the same as as :func:`demean` except
for the default *axis*.
:func:`detrend_linear`
:func:`detrend_none`
:func:`detrend_linear` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
# short-circuit 0-D array.
if not x.ndim:
return np.array(0., dtype=x.dtype)
# short-circuit simple operations
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_none(x, axis=None):
'''
Return x: no detrending.
Call signature::
detrend_none(x, axis=None)
*x*: any object
An object containing the data
*axis*: integer
This parameter is ignored.
It is included for compatibility with detrend_mean
.. seealso::
:func:`denone`
This function is the same as as :func:`denone` except
for the default *axis*, which has no effect.
:func:`detrend_mean`
:func:`detrend_linear`
:func:`detrend_mean` and :func:`detrend_linear` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
return x
def detrend_linear(y):
'''
Return x minus best fit line; 'linear' detrending.
Call signature::
detrend_linear(y)
*y*: 0-D or 1-D array or sequence
Array or sequence containing the data
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
This function is the same as as :func:`delinear` except
for the default *axis*.
:func:`detrend_mean`
:func:`detrend_none`
:func:`detrend_mean` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
'''
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Call signature::
stride_windows(x, n, noverlap=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of data points in each window.
*noverlap*: integer
The overlap between adjacent windows.
Default is 0 (no overlap)
*axis*: integer
The axis along which the windows will run.
Refs:
`stackoverflaw: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflaw: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
'''
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def stride_repeat(x, n, axis=0):
'''
Repeat the values in an array in a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Call signature::
stride_repeat(x, n, axis=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of time to repeat the array.
*axis*: integer
The axis along which the data will run.
Refs:
`stackoverflaw: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
'''
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
'''
This is a helper function that implements the commonality between the
psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
elif sides not in ['onesided', 'twosided']:
raise ValueError("Unknown value for sides %s, must be one of: "
"'default', 'onesided', or 'twosided'" % sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result, windowVals = apply_window(result, window, axis=0,
return_window=True)
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = apply_window(resultY, window, axis=0)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conjugate(result) * resultY
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
pass
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(windowVals)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(windowVals).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
result = np.concatenate((result[freqcenter:, :],
result[:freqcenter, :]), 0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
sides=None):
'''
This is a helper function that implements the commonality between the
complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if mode is None or mode == 'psd' or mode == 'default':
raise ValueError('_single_spectrum_helper does not work with %s mode'
% mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if len(spec.shape) == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(Spectral=cbook.dedent("""
Keyword arguments:
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the spectrum to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided
spectrum, while 'twosided' forces two-sided.
"""))
docstring.interpd.update(Single_Spectrum=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. While not increasing the actual resolution of
the spectrum (the minimum distance between resolvable peaks),
this can give more points in the plot, allowing for more
detail. This corresponds to the *n* parameter in the call to fft().
The default is None, which sets *pad_to* equal to the length of the
input signal (i.e. no padding).
"""))
docstring.interpd.update(PSD=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the spectrum (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*NFFT*: integer
The number of data points used in each block for the FFT.
A power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
*detrend*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or
callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well. You can also use a string to choose
one of the functions. 'default', 'constant', and 'mean' call
:func:`~matplotlib.pylab.detrend_mean`. 'linear' calls
:func:`~matplotlib.pylab.detrend_linear`. 'none' calls
:func:`~matplotlib.pylab.detrend_none`.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxx*, *freqs*).
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not returning
the mean of the segment periodograms; and in returning the
times of the segments.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitude spectrum.
:func:`csd`
:func:`csd` returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxy*, *freqs*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if len(Pxy.shape) == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
@docstring.dedent_interpd
def complex_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the complex-valued frequency spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to the
signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the complex spectrum (complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the absolute value of this
function.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angle of this
function.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of this
function.
:func:`specgram`
:func:`specgram` can return the complex spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='complex')
@docstring.dedent_interpd
def magnitude_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the magnitude (absolute value) of the frequency spectrum of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the magnitude spectrum (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`psd`
:func:`psd` returns the power spectral density.
:func:`complex_spectrum`
This function returns the absolute value of
:func:`complex_spectrum`.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can return the magnitude spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='magnitude')
@docstring.dedent_interpd
def angle_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the angle of the frequency spectrum (wrapped phase spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`angle_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the angle spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='angle')
@docstring.dedent_interpd
def phase_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the phase of the frequency spectrum (unwrapped angle spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the phase spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='phase')
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2,detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default')
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'complex' | 'magnitude'
'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes the
power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 128.
Returns the tuple (*spectrum*, *freqs*, *t*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*).
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
:func:`complex_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'complex'.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'.
"""
if noverlap is None:
noverlap = 128
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs(X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. i.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
e.g., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros((NFFT, numCols), X.dtype)
X[:numRows, :] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i, j in ij:
allColumns.add(i)
allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X):
numFreqs = NFFT
else:
numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
if len(window) != NFFT:
raise ValueError("The length of the window must be equal to NFFT")
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = list(xrange(0, numRows-NFFT+1, NFFT-noverlap))
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros((numSlices, numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice, :] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i, j in ij:
count += 1
if count % 10 == 0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices > 1:
Pxy = np.mean(Pxy, axis=0)
# Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
# Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i, j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i, j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y* in units of nat.
.. math::
-\sum p_i \ln(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n, bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1] - bins[0]
S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x) == 0:
return np.array([])
ind = (x == 0).nonzero()[0]
if len(ind) == 0:
return np.arange(len(x))
if len(ind) == len(x):
return np.array([])
y = np.zeros((len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0]
dn = (dif == -1).nonzero()[0]
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
class PCA(object):
def __init__(self, a, standardize=True):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
*standardize*: True if input data are to be standardized. If False,
only centering will be carried out.
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu*: a numdims array of means of a. This is the vector that points
to the origin of PCA space.
*sigma*: a numdims array of standard deviation of a
*fracs*: the proportion of variance of each of the principal
components
*s*: the actual eigenvalues of the decomposition
*Wt*: the weight vector for projecting a numdims point or array into
PCA space
*Y*: a projected into PCA space
The factor loadings are in the Wt factor, i.e., the factor
loadings for the 1st principal component are given by Wt[0].
This row is also the 1st eigenvector.
"""
n, m = a.shape
if n < m:
raise RuntimeError('we assume data in a is organized with '
'numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
self.standardize = standardize
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
# Note: .H indicates the conjugate transposed / Hermitian.
# The SVD is commonly written as a = U s V.H.
# If U is a unitary matrix, it means that it satisfies U.H = inv(U).
# The rows of Vh are the eigenvectors of a.H a.
# The columns of U are the eigenvectors of a a.H.
# For row i in Vh and column i in U, the corresponding eigenvalue is
# s[i]**2.
self.Wt = Vh
# save the transposed coordinates
Y = np.dot(Vh, a.T).T
self.Y = Y
# save the eigenvalues
self.s = s**2
# and now the contribution of the individual components
vars = self.s/float(len(s))
self.fracs = vars/vars.sum()
def project(self, x, minfrac=0.):
'''
project x onto the principle axes, dropping any axes where fraction
of variance<minfrac
'''
x = np.asarray(x)
ndims = len(x.shape)
if (x.shape[-1] != self.numcols):
raise ValueError('Expected an array with dims[-1]==%d' %
self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs >= minfrac
if ndims == 2:
Yreduced = Y[:, mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'''
center and optionally standardize the data using the mean and sigma
from training set a
'''
if self.standardize:
return (x - self.mu)/self.sigma
else:
return (x - self.mu)
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667, 0.45962655, 0.07608613, 0.135818,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437, 0.10824303, 0.1723387, 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883, -0.26614584, 0.88067144, 1.00474954,
-1.1616545, 0.0266109, 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737, 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p=(0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction
scalar = True
if cbook.iterable(p):
scalar = False
per = np.array(p)
values = np.array(x).ravel() # copy
values.sort()
idxs = per/100. * (values.shape[0] - 1)
ai = idxs.astype(np.int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if scalar:
if cond:
ai -= 1
bi -= 1
frac += 1
else:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
return _interpolate(values[ai], values[bi], frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max() <= 1 or p.min() < 0 or p.max() > 100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:, np.newaxis])
M = M / M.std(axis=1)[:, np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X, Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M, N, frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M, N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0, M-1)
y = np.random.randint(0, N-1)
data[x, y] = np.random.rand()
return data
def dist(x, y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d, d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w, v)
if c1 <= 0:
return dist(p, s0)
c2 = np.dot(v, v)
if c2 <= c1:
return dist(p, s1)
b = c1 / c2
pb = s0 + b * v
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x = window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def movavg(x, n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
# the following code was written and submitted by Fernando Perez
# from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# *****************************************************************************
# Globals
# ****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return np.exp(np.clip(x, exp_safe_MIN, exp_safe_MAX))
else:
return math.exp(x)
def amap(fn, *args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(list(map(fn, *args)))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a, p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p == 'Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini, xfin=None, delta=None, **kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
# defaults
kw.setdefault('closed', 1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin is None:
xfin = xini + 0.0
xini = 0.0
if delta is None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts = kw['npts']
delta = (xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr(number, base=2, padding=0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base:
return (padding - 1) * chars[0] + chars[int(number)]
max_exponent = int(math.log(number)/math.log(base))
max_power = long(base) ** max_exponent
lead_digit = int(number/max_power)
return (chars[lead_digit] +
base_repr(number - max_power * lead_digit, base,
max(padding - 1, max_exponent)))
def binary_repr(number, max_length=1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
# assert number < 2L << max_length
shifts = list(map(operator.rshift, max_length * [number],
range(max_length - 1, -1, -1)))
digits = list(map(operator.mod, shifts, max_length * [2]))
if not digits.count(1):
return 0
digits = digits[digits.index(1):]
return ''.join(map(repr, digits)).replace('L', '')
def log2(x, ln2=math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError, TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape) == np.max(X.shape)
# end fperez numutils code
# helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try:
b = np.isnan(x)
except NotImplementedError:
return False
except TypeError:
return False
else:
return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try:
b = np.isinf(x)
except NotImplementedError:
return False
except TypeError:
return False
else:
return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names)
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError("number of arrays do not match number of names")
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = list(map(np.asarray, arrs))
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError("dtypes must be None, a single dtype or a list")
newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. e.g., ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = list(six.iterkeys(rowd))
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = list(zip(*stats))
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1',
r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row), i) for i, row in enumerate(r1)])
r2d = dict([(makekey(row), i) for i, row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'''
if name is a string key, use the larger size of r1 or r2 before
merging
'''
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r2.dtype[name]
if dt1 != dt2:
msg = "The '{0}' fields in arrays 'r1' and 'r2' must have the same"
msg += " dtype."
raise ValueError(msg.format(name))
if dt1.num > dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names:
return name
else:
return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names:
return name
else:
return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr
if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr
if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype '
'names "%s"' % (thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None:
# fill in the defaults enmasse
newrec_fields = list(six.iterkeys(newrec.dtype.fields.keys))
for k, v in six.iteritems(defaults):
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = (
r1[field][left_ind]
)
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key),
*[iter(r) for r in recs])
def extract(r):
if r is None:
return missing
else:
return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + list(map(extract, row)))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + list(map(extract, row)))
if postfixes is None:
postfixes = ['%d' % i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix)
for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False, dayfirst=False, yearfirst=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g., '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if
any of the data are missing
- *dayfirst*: default is False so that MM-DD-YY has precedence over
DD-MM-YY. See
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
- *yearfirst*: default is False so that MM-DD-YY has precedence over
YY-MM-DD. See
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
If no rows are found, *None* is returned -- see
:file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
fh = cbook.to_filehandle(fname)
delimiter = str(delimiter)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def __next__(self):
return self.fix(next(self.fh))
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter == ' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i >= (skiprows-1):
break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x == 'True':
return True
elif x == 'False':
return False
else:
raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1, 1, 1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
if d.hour > 0 or d.minute > 0 or d.second > 0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1, 1, 1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool: myint, myint: myfloat, myfloat: mydate,
mydate: mydateparser, mydateparser: mystr}
try:
func(name, item)
except:
if func == mystr:
raise ValueError('Could not find a working conversion '
'function')
else:
return get_func(name, item, funcmap[func]) # recurse
else:
return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return': 'return_',
'file': 'file_',
'print': 'print_',
}
def get_converters(reader, comments):
converters = None
i = 0
for row in reader:
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
if i == 0:
converters = [mybool]*len(row)
if checkrows and i > checkrows:
break
i += 1
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d' % i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt > 0:
names.append(item + '_%d' % cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader, comments)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while 1:
# skip past any comments and consume one line of column header
row = next(reader)
if (len(row) and comments is not None and
row[0].startswith(comments)):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row):
continue
if comments is not None and row[0].startswith(comments):
continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val)
for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val)
for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try:
from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked '
'array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj(object):
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't
create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None:
return 'None'
return self.fmt % self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df' % precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d' % int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None:
return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_: FormatBool(),
np.int16: FormatInt(),
np.int32: FormatInt(),
np.int64: FormatInt(),
np.float32: FormatFloat(),
np.float64: FormatFloat(),
np.object_: FormatObj(),
np.string_: FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item, atype=int):
tdict = {None: int, int: float, float: str}
try:
atype(str(item))
except:
return get_type(item, tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if (ntype == np.str or ntype == np.str_ or ntype == np.string0 or
ntype == np.string_):
length = max(len(colname), column.itemsize)
return 0, length+padding, "%s" # left justify
if (ntype == np.int or ntype == np.int16 or ntype == np.int32 or
ntype == np.int64 or ntype == np.int8 or ntype == np.int_):
length = max(len(colname),
np.max(list(map(len, list(map(str, column))))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun
9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if (ntype == np.float or ntype == np.float32 or ntype == np.float64 or
(hasattr(np, 'float96') and (ntype == np.float96)) or
ntype == np.float_):
fmt = "%." + str(precision) + "f"
length = max(
len(colname),
np.max(list(map(len, list(map(lambda x: fmt % x, column)))))
)
return 1, length+padding, fmt # right justify
return (0,
max(len(colname),
np.max(list(map(len, list(map(str, column))))))+padding,
"%s")
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i], r.__getitem__(colname),
precision[i])
for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just, pad, prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just, pad, prec, 0))
else:
pjust, ppad, pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just, pad-padding, prec, 0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just, pad, prec, padding))
else:
justify_pad_prec_spacer.append((just, pad, prec, 0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec % float(item))
elif get_type(item) == int:
item = (prec % int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
for j, colitem in enumerate(row)]))
if i == 0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
delimiter = str(delimiter)
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x, y, z, xi, yi, interp='nn'):
"""Interpolates from a nonuniformly spaced grid to some other
grid.
Fits a surface of the form z = f(`x`, `y`) to the data in the
(usually) nonuniformly spaced vectors (`x`, `y`, `z`), then
interpolates this surface at the points specified by
(`xi`, `yi`) to produce `zi`.
Parameters
----------
x, y, z : 1d array_like
Coordinates of grid points to interpolate from.
xi, yi : 1d or 2d array_like
Coordinates of grid points to interpolate to.
interp : string key from {'nn', 'linear'}
Interpolation algorithm, either 'nn' for natural neighbor, or
'linear' for linear interpolation.
Returns
-------
2d float array
Array of values interpolated at (`xi`, `yi`) points. Array
will be masked is any of (`xi`, `yi`) are outside the convex
hull of (`x`, `y`).
Notes
-----
If `interp` is 'nn' (the default), uses natural neighbor
interpolation based on Delaunay triangulation. This option is
only available if the mpl_toolkits.natgrid module is installed.
This can be downloaded from https://github.com/matplotlib/natgrid.
The (`xi`, `yi`) grid must be regular and monotonically increasing
in this case.
If `interp` is 'linear', linear interpolation is used via
matplotlib.tri.LinearTriInterpolator.
Instead of using `griddata`, more flexible functionality and other
interpolation options are available using a
matplotlib.tri.Triangulation and a matplotlib.tri.TriInterpolator.
"""
# Check input arguments.
x = np.asanyarray(x, dtype=np.float64)
y = np.asanyarray(y, dtype=np.float64)
z = np.asanyarray(z, dtype=np.float64)
if x.shape != y.shape or x.shape != z.shape or x.ndim != 1:
raise ValueError("x, y and z must be equal-length 1-D arrays")
xi = np.asanyarray(xi, dtype=np.float64)
yi = np.asanyarray(yi, dtype=np.float64)
if xi.ndim != yi.ndim:
raise ValueError("xi and yi must be arrays with the same number of "
"dimensions (1 or 2)")
if xi.ndim == 2 and xi.shape != yi.shape:
raise ValueError("if xi and yi are 2D arrays, they must have the same "
"shape")
if xi.ndim == 1:
xi, yi = np.meshgrid(xi, yi)
if interp == 'nn':
use_nn_interpolation = True
elif interp == 'linear':
use_nn_interpolation = False
else:
raise ValueError("interp keyword must be one of 'linear' (for linear "
"interpolation) or 'nn' (for natural neighbor "
"interpolation). Default is 'nn'.")
# Remove masked points.
mask = np.ma.getmask(z)
if not (mask is np.ma.nomask):
x = x.compress(~mask)
y = y.compress(~mask)
z = z.compressed()
if use_nn_interpolation:
try:
from mpl_toolkits.natgrid import _natgrid
except ImportError:
raise RuntimeError(
"To use interp='nn' (Natural Neighbor interpolation) in "
"griddata, natgrid must be installed. Either install it "
"from http://github.com/matplotlib/natgrid or use "
"interp='linear' instead.")
if xi.ndim == 2:
# natgrid expects 1D xi and yi arrays.
xi = xi[0, :]
yi = yi[:, 0]
# Override default natgrid internal parameters.
_natgrid.seti(b'ext', 0)
_natgrid.setr(b'nul', np.nan)
if np.min(np.diff(xi)) < 0 or np.min(np.diff(yi)) < 0:
raise ValueError("Output grid defined by xi,yi must be monotone "
"increasing")
# Allocate array for output (buffer will be overwritten by natgridd)
zi = np.empty((yi.shape[0], xi.shape[0]), np.float64)
# Natgrid requires each array to be contiguous rather than e.g. a view
# that is a non-contiguous slice of another array. Use numpy.require
# to deal with this, which will copy if necessary.
x = np.require(x, requirements=['C'])
y = np.require(y, requirements=['C'])
z = np.require(z, requirements=['C'])
xi = np.require(xi, requirements=['C'])
yi = np.require(yi, requirements=['C'])
_natgrid.natgridd(x, y, z, xi, yi, zi)
# Mask points on grid outside convex hull of input data.
if np.any(np.isnan(zi)):
zi = np.ma.masked_where(np.isnan(zi), zi)
return zi
else:
# Linear interpolation performed using a matplotlib.tri.Triangulation
# and a matplotlib.tri.LinearTriInterpolator.
from .tri import Triangulation, LinearTriInterpolator
triang = Triangulation(x, y)
interpolator = LinearTriInterpolator(triang, z)
return interpolator(xi, yi)
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation(x, y, xi, extrap=False):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi):
xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile(np.nan, s)
for ii, xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx < x[0]:
if extrap:
yi[ii] = y[0]
elif xx > x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x < xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x, y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x = np.asarray(x, np.float_)
y = np.asarray(y, np.float_)
yp = np.zeros(y.shape, np.float_)
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi, x, y, yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x = np.asarray(x, np.float_)
y = np.asarray(y, np.float_)
if x.shape != y.shape:
raise ValueError("'x' and 'y' must be of same shape")
if yp is None:
yp = slopes(x, y)
else:
yp = np.asarray(yp, np.float_)
xi = np.asarray(xi, np.float_)
yi = np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx # note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or
# xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
# using the yp slope of the left point
dy1 = (yp.take(idx) - sidx) * (xi - xidx)
# using the yp slope of the right point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1)
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
class GaussianKDE(object):
"""
Representation of a kernel-density estimate using Gaussian kernels.
Call signature::
kde = GaussianKDE(dataset, bw_method='silverman')
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
isString = isinstance(bw_method, six.string_types)
if bw_method is None:
pass
elif (isString and bw_method == 'scott'):
self.covariance_factor = self.scotts_factor
elif (isString and bw_method == 'silverman'):
self.covariance_factor = self.silverman_factor
elif (np.isscalar(bw_method) and not isString):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = np.sqrt(
np.linalg.det(
2 * np.pi * self.covariance)) * self.num_dp
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
msg = "points have dimension %s, dataset has dimension %s" % (
dim, self.dim)
raise ValueError(msg)
result = np.zeros((num_m,), dtype=np.float)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path(verts)
# Check to see which points are contained withing the Path
return [idx for idx, p in enumerate(points) if poly.contains_point(p)]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, e.g.,::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
numpy = ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
if Nx != Ny:
raise ValueError("'xs' and 'ys' must have the same length")
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if (ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or
ma.isMaskedArray(x)):
numpy = ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate((x, x[::-1]))
y = numpy.concatenate((yupper, ylower[::-1]))
return x, y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
"""
mask = np.asarray(mask, dtype=bool)
if not mask.size:
return []
# Find the indices of region changes, and correct offset
idx, = np.nonzero(mask[:-1] != mask[1:])
idx += 1
# List operations are faster for moderately sized arrays
idx = idx.tolist()
# Add first and/or last index if needed
if mask[0]:
idx = [0] + idx
if mask[-1]:
idx.append(len(mask))
return list(zip(idx[::2], idx[1::2]))
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero((x[:-1] < threshold) & (x[1:] >= threshold))[0]
if len(ind):
return ind+1
else:
return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero((x[:-1] >= threshold) & (x[1:] < threshold))[0]
if len(ind):
return ind+1
else:
return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths(X, P=2., axis=None):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P), axis=axis))**(1./P)
def distances_along_curve(X):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff(X, axis=0)
return vector_lengths(X, axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate((np.zeros(1), np.cumsum(X)))
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# TODO: Candidate for deprecation -- no longer used internally
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
def offset_line(y, yerr):
"""
Offsets an array *y* by +/- an error and returns a tuple
(y - err, y + err).
The error term can be:
* A scalar. In this case, the returned tuple is obvious.
* A vector of the same length as *y*. The quantities y +/- err are computed
component-wise.
* A tuple of length 2. In this case, yerr[0] is the error below *y* and
yerr[1] is error above *y*. For example::
from pylab import *
x = linspace(0, 2*pi, num=100, endpoint=True)
y = sin(x)
y_minus, y_plus = mlab.offset_line(y, 0.1)
plot(x, y)
fill_between(x, ym, y2=yp)
show()
"""
if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and
len(yerr) == len(y)):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = y - yerr[0], y + yerr[1]
else:
raise ValueError("yerr must be scalar, 1xN or 2xN")
return ymin, ymax
|
rbalda/neural_ocr
|
env/lib/python2.7/site-packages/matplotlib/mlab.py
|
Python
|
mit
| 125,545
|
[
"Gaussian"
] |
60a82a91085c1a843251999696f8b93a5cf3c3d9d009e73bd2d99fca3bc420a8
|
# This file contains all the possible names used in toon name generation.
# Each name has a unique id and a category:
# 0 - boyTitle
# 1 - girlTitle
# 2 - neutralTitle
# 3 - boyFirst
# 4 - girlFirst
# 5 - neutralFirst
# 6 - capPrefix
# 7 - lastPrefix
# 8 - lastSuffix
TOONNAMES = '''0*0*Baron
1*0*Duke
2*0*King
3*0*Master
4*0*Mister
5*0*Prince
6*0*Sir
7*1*Lady
8*1*Miss
9*1*Princess
10*1*Queen
11*1*Granny
12*1*Aunt
13*2*Captain
14*2*Cool
15*2*Colonel
16*2*Crazy
17*2*Deputy
18*2*Dippy
19*2*Doctor
20*2*Fat
21*2*Good ol'
22*2*Little
23*2*Loopy
24*2*Loud
25*2*Noisy
26*2*Prof.
27*2*Sheriff
28*2*Skinny
29*2*Silly
30*2*Super
31*2*Ugly
32*2*Weird
33*3*Alvin
34*3*Astro
35*3*Barney
36*3*Bart
37*3*Beppo
38*3*Bert
39*3*Bonzo
40*3*Buford
41*3*Bunky
42*3*Buster
43*3*Butch
44*3*Buzz
45*3*Cecil
46*3*Chester
47*3*Chip
48*3*Chipper
49*3*Clancy
50*3*Clarence
51*3*Cliff
52*3*Clyde
53*3*Dudley
54*3*Duke
55*3*Ernie
56*3*Felix
57*3*Fritz
58*3*Graham
59*3*Harvey
60*3*Hector
61*3*Huey
62*3*Jacques
63*3*Jake
64*3*Knuckles
65*3*Lancelot
66*3*Leroy
67*3*Lionel
68*3*Lloyd
69*3*Louie
70*3*Mac
71*3*Max
72*3*Moe
73*3*Monty
74*3*Milton
75*3*Ned
76*3*Orville
77*3*Oscar
78*3*Oswald
79*3*Ozzie
80*3*Pierre
81*3*Reggie
82*3*Ricky
83*3*Rocco
84*3*Rollie
85*3*Romeo
86*3*Rusty
87*3*Sammie
88*3*Skip
89*3*Skipper
90*3*Skippy
91*3*Spike
92*3*Stinky
93*3*Teddy
94*3*Tom
95*3*Waldo
96*3*Wally
97*3*Wilbur
98*4*Bonnie
99*4*Bubbles
100*4*Candy
101*4*Clover
102*4*Cuddles
103*4*Daffodil
104*4*Daphne
105*4*Dee Dee
106*4*Dottie
107*4*Ginger
108*4*Gwen
109*4*Ladybug
110*4*Lily
111*4*Marigold
112*4*Maxie
113*4*Melody
114*4*Mo Mo
115*4*Nutmeg
116*4*Olive
117*4*Peaches
118*4*Pearl
119*4*Penny
120*4*Petunia
121*4*Rainbow
122*4*Raven
123*4*Robin
124*4*Rosie
125*4*Roxy
126*4*Sadie
127*4*Sally
128*4*Sandy
129*4*Taffy
130*4*Trixie
131*4*Ursula
132*4*Valentine
133*4*Violet
134*4*Vicky
135*4*Willow
136*5*B.D.
137*5*Banjo
138*5*Batty
139*5*Beany
140*5*Bebop
141*5*Bingo
142*5*Binky
143*5*Biscuit
144*5*Bongo
145*5*Boo Boo
146*5*Bonkers
147*5*Bouncey
148*5*Bizzy
149*5*Blinky
150*5*Bumpy
151*5*C.J.
152*5*C.W.
153*5*Chirpy
154*5*Chunky
155*5*Coconut
156*5*Comet
157*5*Corky
158*5*Corny
159*5*Cranky
160*5*Crazy
161*5*Cricket
162*5*Crumbly
163*5*Curly
164*5*Cuckoo
165*5*Daffy
166*5*Dinky
167*5*Dizzy
168*5*Domino
169*5*Drippy
170*5*Droopy
171*5*Dusty
172*5*Dynamite
173*5*Fancy
174*5*Fangs
175*5*Fireball
176*5*Fleabag
177*5*Flapjack
178*5*Flappy
179*5*Flip
180*5*Fluffy
181*5*Freckles
182*5*Frizzy
183*5*Furball
184*5*Goopy
185*5*Huddles
186*5*J.C.
187*5*Jazzy
188*5*Jellyroll
189*5*Kippy
190*5*Kit
192*5*Lollipop
193*5*Loony
194*5*Loopy
195*5*Lucky
196*5*Mildew
197*5*Murky
198*5*Nutty
199*5*Pancake
200*5*Peanut
201*5*Peppy
202*5*Pickles
203*5*Pinky
204*5*Popcorn
205*5*Poppy
206*5*Presto
207*5*Rhubarb
208*5*Salty
209*5*Scooter
210*5*Skids
211*5*Skimpy
212*5*Soupy
214*5*Slippy
215*5*Slumpy
216*5*Smirky
217*5*Snappy
218*5*Sniffy
219*5*Snuffy
220*5*Spiffy
221*5*Spotty
222*5*Spunky
223*5*Squeaky
224*5*Stripey
225*5*Star
226*5*Stubby
227*5*Tricky
228*5*Tubby
229*5*Von
230*5*Wacky
231*5*Wacko
232*5*Whiskers
234*5*Yippie
235*5*Z.Z.
236*5*Zany
237*5*Ziggy
238*5*Zilly
239*5*Zippy
240*5*Zippety
241*5*Zowie
242*6*Mc
243*6*Mac
244*7*Bagel
245*7*Banana
246*7*Bean
247*7*Beanie
248*7*Biggen
249*7*Bizzen
250*7*Blubber
251*7*Boingen
252*7*Bumber
253*7*Bumble
254*7*Bumpen
255*7*Cheezy
256*7*Crinkle
257*7*Crumble
258*7*Crunchen
259*7*Crunchy
260*7*Dandy
262*7*Dizzen
263*7*Dizzy
264*7*Doggen
265*7*Dyno
266*7*Electro
267*7*Feather
268*7*Fiddle
269*7*Frinkel
270*7*Fizzle
271*7*Flippen
272*7*Flipper
273*7*Fumble
274*7*Funny
275*7*Fuzzy
276*7*Giggle
277*7*Glitter
278*7*Google
279*7*Grumble
280*7*Gumdrop
281*7*Huckle
282*7*Hula
283*7*Jabber
284*7*Jeeper
285*7*Jinx
286*7*Jumble
287*7*Kooky
288*7*Lemon
289*7*Loopen
290*7*Mac
291*7*Mc
292*7*Mega
293*7*Mizzen
294*7*Nickel
295*7*Nutty
296*7*Octo
297*7*Paddle
298*7*Pale
299*7*Pedal
300*7*Pepper
301*7*Petal
302*7*Pickle
303*7*Pinker
304*7*Poodle
305*7*Precious
306*7*Pumpkin
307*7*Purple
308*7*Poppen
309*7*Rhino
310*7*Robo
311*7*Rocken
312*7*Ruffle
313*7*Smarty
314*7*Sniffle
315*7*Snorkel
316*7*Sour
317*7*Sparkle
318*7*Squiggle
319*7*Super
320*7*Spackle
321*7*Thunder
322*7*Toppen
323*7*Tricky
324*7*Tweedle
325*7*Twiddle
326*7*Twinkle
327*7*Wacky
328*7*Weasel
329*7*Whisker
330*7*Whistle
331*7*Wild
332*7*Witty
333*7*Wonder
334*7*Wrinkle
335*7*Ziller
336*7*Zippen
337*7*Zooble
338*8*bee
339*8*berry
340*8*blabber
341*8*bocker
342*8*boing
343*8*boom
344*8*bounce
345*8*bouncer
346*8*brains
347*8*bubble
348*8*bumble
349*8*bump
350*8*bumper
351*8*chomp
352*8*corn
353*8*crash
354*8*crumbs
355*8*crump
356*8*crunch
357*8*doodle
358*8*dorf
359*8*face
360*8*fidget
361*8*fish
362*8*flap
363*8*fuddy
364*8*flapper
365*8*fink
366*8*flinger
367*8*flip
368*8*flipper
369*8*foot
370*8*fussen
371*8*gadget
373*8*glop
374*8*gloop
375*8*goober
376*8*goose
377*8*grooven
378*8*hoffer
379*8*hopper
380*8*jinks
381*8*klunk
382*8*knees
383*8*marble
384*8*mash
385*8*monkey
386*8*mooch
387*8*mouth
388*8*muddle
389*8*muffin
390*8*mush
391*8*nerd
392*8*noodle
393*8*nose
394*8*nugget
395*8*phew
396*8*phooey
397*8*pocket
398*8*pop
399*8*pow
400*8*pretzel
401*8*pounce
402*8*poof
403*8*quack
404*8*roni
405*8*scooter
406*8*screech
407*8*smirk
408*8*snoop
409*8*snooker
410*8*snout
411*8*socks
412*8*speed
413*8*spinner
414*8*splat
415*8*sprinkles
416*8*sticks
417*8*stink
418*8*swirl
419*8*teeth
420*8*thud
421*8*toes
422*8*ton
423*8*toon
424*8*tooth
425*8*twist
426*8*whatsit
427*8*whip
428*8*wig
429*8*woof
430*8*zaner
431*8*zap
432*8*zapper
433*8*zilla
434*8*zoom
435*8*burger
455*8*gabber
456*7*Tinker
457*8*sparkles
459*8*zoop
460*8*bop
461*8*squeak
467*8*glow
470*8*wicket
471*7*Slimey
472*8*son
474*8*beep
475*7*Flower
477*5*Miles
478*5*Tegan
479*5*Giggles
480*5*Twister
481*5*Funky
482*3*Garfield
484*5*Winnie
486*4*Darla
487*5*Hoppy
488*5*Pebbles
490*3*Billy
491*5*Finn
492*5*Flint
494*4*Veronica
495*4*Beatrix
496*1*Madame
501*2*Daring
505*3*Arnold
506*3*Albert
507*3*Casper
508*3*Bruce
514*5*Grouchy
516*3*Leonardo
521*3*Olaf
522*5*Gale
524*3*Curt
525*4*Tutu
529*4*Tammy
530*5*Midge
531*5*Jay
532*5*Punchy
533*4*Rose
534*3*Bob
538*4*Kiki
539*4*Mitzi
540*4*Patty
542*7*Swinkle
543*8*batch
544*4*Becky
547*4*Molly
548*4*Flora
550*5*Cookie
552*3*Rodney
556*4*Velma
557*3*Hans
558*3*Elmer
559*3*Roscoe
560*4*Sylvia
562*4*Astrid
564*5*Rory
565*4*Penelope
567*4*Bella
568*4*Octavia
569*4*Aurora
570*3*Tex
571*4*Lucy
572*5*Truffles
573*5*Pippy
574*5*Bonbon
575*5*Dot
576*4*Ruby
577*5*Hazel
578*4*Carol
581*4*Whitney
583*2*Chief
584*4*Duchess
588*2*Grumpy
589*2*Grand ol'
590*0*Count
595*0*Mr.
596*1*Mrs.
610*2*Coach
611*2*Dr.
617*2*Cap'n
625*0*Sergeant
641*3*Jack
642*4*Jackie
643*4*Angel
644*4*Claire
656*8*loose
657*8*loop
660*7*Fluffen
661*8*fluff
663*7*Pillow
664*8*paws
665*7*Honey
668*7*Jelly
669*7*Jiggle
676*8*wiggle
677*8*wire
681*3*Bentley
684*3*Maxwell
687*3*Wesley
691*3*Phil
692*4*Zaza
696*7*Razzle
697*8*dazzle
703*3*Poe
704*8*sprocket
717*8*butter
718*7*Nutter
720*8*song
725*2*Chef
728*4*April
734*3*Rover
741*3*Harry
752*3*Leo
754*4*Joyce
755*4*Alice
758*4*Abigail
760*4*Patsy
761*4*Clara
763*4*Mabel
773*4*Susan
775*4*Barbara
777*3*Roger
781*4*Jenny
785*3*Gary
789*4*Olivia
790*4*Maggie
791*4*Lulu
793*7*Cuddle
821*6*O'
834*7*Frazzle
838*5*Yappy
842*5*Zoinks
848*5*Boots
849*5*Midnight
858*4*Bridget
861*4*Jade
862*3*John
862*3*Johnny
863*3*Dave
864*3*David
865*3*Davey
866*4*Rosey
871*7*Barnacle
873*3*Gus
878*5*Sunny
879*4*Sunshine
882*5*Chewy
894*4*Wendy
901*7*Jingle
903*7*Jiffy
904*3*Jester
905*4*Nelly
906*5*P.J.
910*5*J.J.
917*5*Sneezy
918*5*Smudge
921*7*Snaggle
922*4*Sassy
925*2*Judge
945*4*Holly
964*2*Lucky
968*8*whirl
969*8*grump
982*7*Riddle
984*8*tail
987*8*masher
989*3*William
1000*8*grin
1004*8*mew
1010*5*Pepper
1012*8*melon
1016*3*Gulliver
1024*4*Mary
1030*3*Jimmy
1032*3*Jonah
1038*7*Laffen
1063*1*Fancy
1088*3*Bud'''
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/makeatoon/ToonNamesEnglish.py
|
Python
|
apache-2.0
| 8,178
|
[
"MOE",
"TINKER"
] |
efc8ba507015847dda5bc65075c7e8134d103046f4bd79d9280bc44348d3de64
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
class ThermoDB(object):
"""
Class containing data objects for a thermodynamic database
for geochemical modelling
"""
def __init__(self):
self._format = None
self._filename = None
self._header = None
self._activity_model = None
self._fugacity_model = None
self._logk_model = None
self._logk_model_eqn = None
self._temperatures = None
self._pressures = None
self._elements = None
self._basis_species = None
self._secondary_species = None
self._free_electron = None
self._mineral_species = None
self._gas_species = None
self._redox_couples = None
self._adh = None
self._bdh = None
self._bdot = None
self._neutral_species = None
self._sorbing_minerals = None
self._surface_species = None
# Original DB format
@property
def format(self):
return self._format
@format.setter
def format(self, format):
self._format = format
# Original DB header information
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
# Original DB activity model
@property
def activity_model(self):
return self._activity_model
@activity_model.setter
def activity_model(self, activity_model):
self._activity_model = activity_model
# Original DB fugacity model
@property
def fugacity_model(self):
return self._fugacity_model
@fugacity_model.setter
def fugacity_model(self, fugacity_model):
self._fugacity_model = fugacity_model
# Original DB equilibrium constant model
@property
def logk_model(self):
return self._logk_model
@logk_model.setter
def logk_model(self, logk_model):
self._logk_model = logk_model
# Original DB equilibrium constant model in equation form
@property
def logk_model_eqn(self):
return self._logk_model_eqn
@logk_model_eqn.setter
def logk_model_eqn(self, logk_model_eqn):
self._logk_model_eqn = logk_model_eqn
# Original DB temperature points
# (for reaction equilibrium constants)
@property
def temperatures(self):
return self._temperatures
@temperatures.setter
def temperatures(self, temperatures):
self._temperatures = temperatures
# Original DB pressure points
@property
def pressures(self):
return self._pressures
@pressures.setter
def pressures(self, pressures):
self._pressures = pressures
# Element data
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, elements):
self._elements = elements
# Basis species data
@property
def basis_species(self):
return self._basis_species
@basis_species.setter
def basis_species(self, basis_species):
self._basis_species = basis_species
# Secondary species data
@property
def secondary_species(self):
return self._secondary_species
@secondary_species.setter
def secondary_species(self, secondary_species):
self._secondary_species = secondary_species
# Free electron data
@property
def free_electron(self):
return self._free_electron
@free_electron.setter
def free_electron(self, free_electron):
self._free_electron = free_electron
# Mineral species data
@property
def mineral_species(self):
return self._mineral_species
@mineral_species.setter
def mineral_species(self, mineral_species):
self._mineral_species = mineral_species
# Gas species data
@property
def gas_species(self):
return self._gas_species
@gas_species.setter
def gas_species(self, gas_species):
self._gas_species = gas_species
# Redox couples data
@property
def redox_couples(self):
return self._redox_couples
@redox_couples.setter
def redox_couples(self, redox_couples):
self._redox_couples = redox_couples
# Debye-Huckel a data
@property
def adh(self):
return self._adh
@adh.setter
def adh(self, adh):
self._adh = adh
# Debye-Huckel b data
@property
def bdh(self):
return self._bdh
@bdh.setter
def bdh(self, bdh):
self._bdh = bdh
# Debye-Huckel bdot data
@property
def bdot(self):
return self._bdot
@bdot.setter
def bdot(self, bdot):
self._bdot = bdot
# Neutral species data
@property
def neutral_species(self):
return self._neutral_species
@neutral_species.setter
def neutral_species(self, neutral_species):
self._neutral_species = neutral_species
# Sorbing minerals
@property
def sorbing_minerals(self):
return self._sorbing_minerals
@sorbing_minerals.setter
def sorbing_minerals(self, sorbing_minerals):
self._sorbing_minerals = sorbing_minerals
# surface species
@property
def surface_species(self):
return self._surface_species
@surface_species.setter
def surface_species(self, surface_species):
self._surface_species = surface_species
|
nuclear-wizard/moose
|
modules/geochemistry/python/dbclass.py
|
Python
|
lgpl-2.1
| 5,668
|
[
"MOOSE"
] |
64a8bc63d23cf24ad3103f5d5ae8266645c992e7378b54a7413500a52b8a9fba
|
#! /usr/bin/env python3
import os
import matplotlib.pyplot as plt
import galore
import galore.formats
import galore.plot
from tempfile import mkstemp
fd, tmp = mkstemp(suffix='.txt', text=True)
os.close(fd)
sim_xvals = [10, 15, 19.5, 20, 20.5, 21]
sim_yvals = [12, 6, 3, 2, 1, 2]
galore.formats.write_txt(sim_xvals, sim_yvals, filename=tmp)
fig = plt.figure(figsize=(4,2))
ax1 = fig.add_subplot(1, 2, 1)
for x, y in zip(sim_xvals, sim_yvals):
ax1.plot([x, x], [0, y], 'k-')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_title("Ideal peaks")
x, y = galore.process_1d_data(input=tmp, gaussian=2)
ax2 = fig.add_subplot(1, 2, 2, sharey=ax1)
ax2.plot(x, y, 'k-')
ax2.set_ylim((0, None))
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax2.set_title("With broadening")
fig.savefig('docs/source/figures/ir_schematic.pdf')
|
SMTG-UCL/galore
|
docs/source/figures/ir_schematic.py
|
Python
|
gpl-3.0
| 840
|
[
"Gaussian"
] |
c6d0ebd5c6330f63803687b7e2ff8cdf107dfbc63a255847ee615e285e6653f0
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generate symmetry adapted basis
'''
from functools import reduce
import numpy
from pyscf.data.elements import _symbol, _rm_digit
from pyscf.symm import geom
from pyscf.symm import param
__all__ = ['tot_parity_odd',
'symm_adapted_basis',
'dump_symm_adapted_basis',
'symmetrize_matrix',
'linearmole_symm_descent',
'linearmole_irrep_symb2id',
'linearmole_irrep_id2symb',
'linearmole_symm_adapted_basis',
]
OP_PARITY_ODD = {
'E' : (0, 0, 0),
'C2x': (0, 1, 1),
'C2y': (1, 0, 1),
'C2z': (1, 1, 0),
'i' : (1, 1, 1),
'sx' : (1, 0, 0),
'sy' : (0, 1, 0),
'sz' : (0, 0, 1),
}
def tot_parity_odd(op, l, m):
if op == 'E':
return 0
else:
ox,oy,oz = OP_PARITY_ODD[op]
gx,gy,gz = param.SPHERIC_GTO_PARITY_ODD[l][l+m]
return (ox and gx)^(oy and gy)^(oz and gz)
def symm_adapted_basis(mol, gpname, orig=0, coordinates=None):
if gpname in ('Dooh', 'Coov'):
return linearmole_symm_adapted_basis(mol, gpname, orig, coordinates)
# prop_atoms are the atoms relocated wrt the charge center with proper
# orientation
if coordinates is None:
coordinates = numpy.eye(3)
prop_atoms = mol.format_atom(mol._atom, orig, coordinates, 'Bohr')
eql_atom_ids = geom.symm_identical_atoms(gpname, prop_atoms)
ops = numpy.asarray([param.D2H_OPS[op] for op in param.OPERATOR_TABLE[gpname]])
chartab = numpy.array([x[1:] for x in param.CHARACTER_TABLE[gpname]])
nirrep = chartab.__len__()
aoslice = mol.aoslice_by_atom()
nao = mol.nao_nr()
atom_coords = numpy.array([a[1] for a in prop_atoms])
sodic = [[] for i in range(8)]
for atom_ids in eql_atom_ids:
r0 = atom_coords[atom_ids[0]]
op_coords = numpy.einsum('x,nxy->ny', r0, ops)
# Using ops to generate other atoms from atom_ids[0]
coords0 = atom_coords[atom_ids]
natm = len(atom_ids)
dc = abs(op_coords.reshape(-1,1,3) - coords0).max(axis=2)
op_relate_idx = numpy.argwhere(dc < geom.TOLERANCE)[:,1]
ao_loc = numpy.array([aoslice[atom_ids[i],2] for i in op_relate_idx])
b0, b1 = aoslice[atom_ids[0],:2]
ip = 0
for ib in range(b0, b1):
l = mol.bas_angular(ib)
if mol.cart:
degen = (l + 1) * (l + 2) // 2
cbase = numpy.zeros((degen,nirrep,nao))
for op_id, op in enumerate(ops):
n = 0
for x in range(l, -1, -1):
for y in range(l-x, -1, -1):
z = l-x-y
idx = ao_loc[op_id] + n
sign = op[0,0]**x * op[1,1]**y * op[2,2]**z
cbase[n,:,idx] += sign * chartab[:,op_id]
n += 1
else:
degen = l * 2 + 1
cbase = numpy.zeros((degen,nirrep,nao))
for op_id, op in enumerate(param.OPERATOR_TABLE[gpname]):
for n, m in enumerate(range(-l, l+1)):
idx = ao_loc[op_id] + n
if tot_parity_odd(op, l, m):
cbase[n,:,idx] -= chartab[:,op_id]
else:
cbase[n,:,idx] += chartab[:,op_id]
norms = numpy.sqrt(numpy.einsum('mij,mij->mi', cbase, cbase))
for i in range(mol.bas_nctr(ib)):
for n, ir in numpy.argwhere(norms > 1e-12):
c = numpy.zeros(nao)
c[ip:] = cbase[n,ir,:nao-ip] / norms[n,ir]
sodic[ir].append(c)
ip += degen
ao_loc = mol.ao_loc_nr()
l_idx = {}
ANG_OF = 1
for l in range(mol._bas[:,ANG_OF].max()+1):
idx = [numpy.arange(ao_loc[ib], ao_loc[ib+1])
for ib in numpy.where(mol._bas[:,ANG_OF] == l)[0]]
if idx:
l_idx[l] = numpy.hstack(idx)
Ds = _ao_rotation_matrices(mol, coordinates)
so = []
irrep_ids = []
for ir, c in enumerate(sodic):
if len(c) > 0:
irrep_ids.append(ir)
c_ir = numpy.vstack(c).T
nso = c_ir.shape[1]
for l, idx in l_idx.items():
c = c_ir[idx].reshape(-1,Ds[l].shape[1],nso)
c_ir[idx] = numpy.einsum('nm,smp->snp', Ds[l], c).reshape(-1,nso)
so.append(c_ir)
return so, irrep_ids
def _ao_rotation_matrices(mol, axes):
'''Cache the rotation matrices'''
from pyscf import lib
from pyscf.symm.Dmatrix import Dmatrix, get_euler_angles
alpha, beta, gamma = get_euler_angles(numpy.eye(3), axes)
ANG_OF = 1
l_max = mol._bas[:,ANG_OF].max()
if not mol.cart:
return [Dmatrix(l, alpha, beta, gamma, reorder_p=True)
for l in range(l_max+1)]
pp = Dmatrix(1, alpha, beta, gamma, reorder_p=True)
Ds = [numpy.ones((1,1))]
for l in range(1, l_max+1):
# All possible x,y,z combinations
cidx = numpy.sort(lib.cartesian_prod([(0, 1, 2)] * l), axis=1)
addr = 0
affine = numpy.ones((1,1))
for i in range(l):
nd = affine.shape[0] * 3
affine = numpy.einsum('ik,jl->ijkl', affine, pp).reshape(nd, nd)
addr = addr * 3 + cidx[:,i]
uniq_addr, rev_addr = numpy.unique(addr, return_inverse=True)
ncart = (l + 1) * (l + 2) // 2
assert ncart == uniq_addr.size
trans = numpy.zeros((ncart,ncart))
for i, k in enumerate(rev_addr):
trans[k] += affine[i,uniq_addr]
Ds.append(trans)
return Ds
def dump_symm_adapted_basis(mol, so):
raise RuntimeError('TODO')
def symmetrize_matrix(mat, so):
return [reduce(numpy.dot, (c.conj().T,mat,c)) for c in so]
def _basis_offset_for_atoms(atoms, basis_tab):
basoff = [0]
n = 0
for at in atoms:
symb = _symbol(at[0])
if symb in basis_tab:
bas0 = basis_tab[symb]
else:
bas0 = basis_tab[_rm_digit(symb)]
for b in bas0:
angl = b[0]
n += _num_contract(b) * (angl*2+1)
basoff.append(n)
return n, basoff
def _num_contract(basis):
if isinstance(basis[1], int):
# This branch should never be reached if basis_tab is formated by function mole.format_basis
nctr = len(basis[2]) - 1
else:
nctr = len(basis[1]) - 1
return nctr
###############################
# Linear molecule
# Irreps ID maps
# Dooh -> D2h | Coov -> C2v
# A1g 0 Ag 0 | A1 0 A1 0
# A2g 1 B1g 1 | A2 1 A2 1
# A1u 5 B1u 5 | E1x 2 B1 2
# A2u 4 Au 4 | E1y 3 B2 3
# E1gx 2 B2g 2 | E2x 10 A1 0
# E1gy 3 B3g 3 | E2y 11 A2 1
# E1ux 7 B3u 7 | E3x 12 B1 2
# E1uy 6 B2u 6 | E3y 13 B2 3
# E2gx 10 Ag 0 | E4x 20 A1 0
# E2gy 11 B1g 1 | E4y 21 A2 1
# E2ux 15 B1u 5 | E5x 22 B1 2
# E2uy 14 Au 4 | E5y 23 B2 3
# E3gx 12 B2g 2 |
# E3gy 13 B3g 3 |
# E3ux 17 B3u 7 |
# E3uy 16 B2u 6 |
# E4gx 20 Ag 0 |
# E4gy 21 B1g 1 |
# E4ux 25 B1u 5 |
# E4uy 24 Au 4 |
# E5gx 22 B2g 2 |
# E5gy 23 B3g 3 |
# E5ux 27 B3u 7 |
# E5uy 26 B2u 6 |
DOOH_IRREP_ID_TABLE = {
'A1g' : 0,
'A2g' : 1,
'A1u' : 5,
'A2u' : 4,
'E1gx': 2,
'E1gy': 3,
'E1ux': 7,
'E1uy': 6,
'_evengx': 0,
'_evengy': 1,
'_evenux': 5,
'_evenuy': 4,
'_oddgx': 2,
'_oddgy': 3,
'_oddux': 7,
'_odduy': 6,
}
COOV_IRREP_ID_TABLE = {
'A1' : 0,
'A2' : 1,
'E1x': 2,
'E1y': 3,
'_evenx': 0,
'_eveny': 1,
'_oddx': 2,
'_oddy': 3,
}
def linearmole_symm_descent(gpname, irrepid):
'''Map irreps to D2h or C2v'''
if gpname in ('Dooh', 'Coov'):
return irrepid % 10
else:
raise RuntimeError('%s is not proper for linear molecule.' % gpname)
def linearmole_irrep_symb2id(gpname, symb):
if gpname == 'Dooh':
if symb in DOOH_IRREP_ID_TABLE:
return DOOH_IRREP_ID_TABLE[symb]
else:
n = int(''.join([i for i in symb if i.isdigit()]))
if n % 2:
return (n//2)*10 + DOOH_IRREP_ID_TABLE['_odd'+symb[-2:]]
else:
return (n//2)*10 + DOOH_IRREP_ID_TABLE['_even'+symb[-2:]]
elif gpname == 'Coov':
if symb in COOV_IRREP_ID_TABLE:
return COOV_IRREP_ID_TABLE[symb]
else:
n = int(''.join([i for i in symb if i.isdigit()]))
if n % 2:
return (n//2)*10 + COOV_IRREP_ID_TABLE['_odd'+symb[-1]]
else:
return (n//2)*10 + COOV_IRREP_ID_TABLE['_even'+symb[-1]]
else:
raise RuntimeError('%s is not proper for linear molecule.' % gpname)
DOOH_IRREP_SYMBS = ('A1g' , 'A2g' , 'E1gx', 'E1gy' , 'A2u', 'A1u' , 'E1uy', 'E1ux')
DOOH_IRREP_SYMBS_EXT = ('gx' , 'gy' , 'gx', 'gy' , 'uy', 'ux' , 'uy', 'ux')
COOV_IRREP_SYMBS = ('A1' , 'A2' , 'E1x', 'E1y')
def linearmole_irrep_id2symb(gpname, irrep_id):
if gpname == 'Dooh':
if irrep_id < 10:
return DOOH_IRREP_SYMBS[irrep_id]
else:
n = irrep_id % 10
m = irrep_id // 10
if n in (0, 1, 5, 4):
rn = m*2
else:
rn = m*2+1
return 'E%d%s' % (rn, DOOH_IRREP_SYMBS_EXT[n])
elif gpname == 'Coov':
if irrep_id < 10:
return COOV_IRREP_SYMBS[irrep_id]
else:
n = irrep_id % 10
m = irrep_id // 10
if n < 2:
rn = m*2
else:
rn = m*2+1
if n % 2:
xy = 'y'
else:
xy = 'x'
return 'E%d%s' % (rn, xy)
else:
raise RuntimeError('%s is not proper for linear molecule.' % gpname)
def linearmole_symm_adapted_basis(mol, gpname, orig=0, coordinates=None):
assert(gpname in ('Dooh', 'Coov'))
assert(not mol.cart)
if coordinates is None:
coordinates = numpy.eye(3)
prop_atoms = mol.format_atom(mol._atom, orig, coordinates, 'Bohr')
eql_atom_ids = geom.symm_identical_atoms(gpname, prop_atoms)
aoslice = mol.aoslice_by_atom()
basoff = aoslice[:,2]
nao = mol.nao_nr()
sodic = {}
shalf = numpy.sqrt(.5)
def plus(i0, i1):
c = numpy.zeros(nao)
c[i0] = c[i1] = shalf
return c
def minus(i0, i1):
c = numpy.zeros(nao)
c[i0] = shalf
c[i1] =-shalf
return c
def identity(i0):
c = numpy.zeros(nao)
c[i0] = 1
return c
def add_so(irrep_name, c):
if irrep_name in sodic:
sodic[irrep_name].append(c)
else:
sodic[irrep_name] = [c]
if gpname == 'Dooh':
for atom_ids in eql_atom_ids:
if len(atom_ids) == 2:
at0 = atom_ids[0]
at1 = atom_ids[1]
ip = 0
b0, b1, p0, p1 = aoslice[at0]
for ib in range(b0, b1):
angl = mol.bas_angular(ib)
nc = mol.bas_nctr(ib)
degen = angl * 2 + 1
if angl == 1:
for i in range(nc):
aoff = ip + i*degen + angl
# m = 0
idx0 = basoff[at0] + aoff + 1
idx1 = basoff[at1] + aoff + 1
add_so('A1g', minus(idx0, idx1))
add_so('A1u', plus (idx0, idx1))
# m = +/- 1
idx0 = basoff[at0] + aoff - 1
idy0 = basoff[at0] + aoff
idx1 = basoff[at1] + aoff - 1
idy1 = basoff[at1] + aoff
add_so('E1ux', plus (idx0, idx1))
add_so('E1uy', plus (idy0, idy1))
add_so('E1gx', minus(idx0, idx1))
add_so('E1gy', minus(idy0, idy1))
else:
for i in range(nc):
aoff = ip + i*degen + angl
# m = 0
idx0 = basoff[at0] + aoff
idx1 = basoff[at1] + aoff
if angl % 2: # p-sigma, f-sigma
add_so('A1g', minus(idx0, idx1))
add_so('A1u', plus (idx0, idx1))
else: # s-sigma, d-sigma
add_so('A1g', plus (idx0, idx1))
add_so('A1u', minus(idx0, idx1))
# +/-m
for m in range(1,angl+1):
idx0 = basoff[at0] + aoff + m
idy0 = basoff[at0] + aoff - m
idx1 = basoff[at1] + aoff + m
idy1 = basoff[at1] + aoff - m
if angl % 2: # odd parity
add_so('E%dux'%m, plus (idx0, idx1))
add_so('E%duy'%m, plus (idy0, idy1))
add_so('E%dgx'%m, minus(idx0, idx1))
add_so('E%dgy'%m, minus(idy0, idy1))
else:
add_so('E%dgy'%m, plus (idy0, idy1))
add_so('E%dgx'%m, plus (idx0, idx1))
add_so('E%duy'%m, minus(idy0, idy1))
add_so('E%dux'%m, minus(idx0, idx1))
ip += nc * degen
elif len(atom_ids) == 1:
at0 = atom_ids[0]
ip = 0
b0, b1, p0, p1 = aoslice[at0]
for ib in range(b0, b1):
angl = mol.bas_angular(ib)
nc = mol.bas_nctr(ib)
degen = angl * 2 + 1
if angl == 1:
for i in range(nc):
aoff = ip + i*degen + angl
# m = 0
idx0 = basoff[at0] + aoff + 1
add_so('A1u', identity(idx0))
# m = +/- 1
idx0 = basoff[at0] + aoff - 1
idy0 = basoff[at0] + aoff
add_so('E1uy', identity(idy0))
add_so('E1ux', identity(idx0))
else:
for i in range(nc):
aoff = ip + i*degen + angl
idx0 = basoff[at0] + aoff
# m = 0
if angl % 2:
add_so('A1u', identity(idx0))
else:
add_so('A1g', identity(idx0))
# +/-m
for m in range(1,angl+1):
idx0 = basoff[at0] + aoff + m
idy0 = basoff[at0] + aoff - m
if angl % 2: # p, f functions
add_so('E%dux'%m, identity(idx0))
add_so('E%duy'%m, identity(idy0))
else: # d, g functions
add_so('E%dgy'%m, identity(idy0))
add_so('E%dgx'%m, identity(idx0))
ip += nc * degen
elif gpname == 'Coov':
for atom_ids in eql_atom_ids:
at0 = atom_ids[0]
ip = 0
b0, b1, p0, p1 = aoslice[at0]
for ib in range(b0, b1):
angl = mol.bas_angular(ib)
nc = mol.bas_nctr(ib)
degen = angl * 2 + 1
if angl == 1:
for i in range(nc):
aoff = ip + i*degen + angl
# m = 0
idx0 = basoff[at0] + aoff + 1
add_so('A1', identity(idx0))
# m = +/- 1
idx0 = basoff[at0] + aoff - 1
idy0 = basoff[at0] + aoff
add_so('E1x', identity(idx0))
add_so('E1y', identity(idy0))
else:
for i in range(nc):
aoff = ip + i*degen + angl
idx0 = basoff[at0] + aoff
# m = 0
add_so('A1', identity(idx0))
# +/-m
for m in range(1,angl+1):
idx0 = basoff[at0] + aoff + m
idy0 = basoff[at0] + aoff - m
add_so('E%dx'%m, identity(idx0))
add_so('E%dy'%m, identity(idy0))
ip += nc * degen
irrep_ids = []
irrep_names = list(sodic.keys())
for irname in irrep_names:
irrep_ids.append(linearmole_irrep_symb2id(gpname, irname))
irrep_idx = numpy.argsort(irrep_ids)
irrep_ids = [irrep_ids[i] for i in irrep_idx]
ao_loc = mol.ao_loc_nr()
l_idx = {}
ANG_OF = 1
for l in range(mol._bas[:,ANG_OF].max()+1):
idx = [numpy.arange(ao_loc[ib], ao_loc[ib+1])
for ib in numpy.where(mol._bas[:,ANG_OF] == l)[0]]
if idx:
l_idx[l] = numpy.hstack(idx)
Ds = _ao_rotation_matrices(mol, coordinates)
so = []
for i in irrep_idx:
c_ir = numpy.vstack(sodic[irrep_names[i]]).T
nso = c_ir.shape[1]
for l, idx in l_idx.items():
c = c_ir[idx].reshape(-1,Ds[l].shape[1],nso)
c_ir[idx] = numpy.einsum('nm,smp->snp', Ds[l], c).reshape(-1,nso)
so.append(c_ir)
return so, irrep_ids
if __name__ == "__main__":
from pyscf import gto
h2o = gto.Mole()
h2o.verbose = 0
h2o.output = None
h2o.atom = [['O' , (1. , 0. , 0. ,)],
[1 , (0. , -.757 , 0.587,)],
[1 , (0. , 0.757 , 0.587,)] ]
h2o.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
h2o.build()
gpname, origin, axes = geom.detect_symm(h2o._atom)
atoms = gto.format_atom(h2o._atom, origin, axes)
h2o.build(False, False, atom=atoms)
print(gpname)
eql_atoms = geom.symm_identical_atoms(gpname, atoms)
print(symm_adapted_basis(h2o, gpname, eql_atoms)[1])
mol = gto.M(
atom = [['H', (0,0,0)], ['H', (0,0,-1)], ['H', (0,0,1)]],
basis = 'ccpvtz', charge=1)
gpname, orig, axes = geom.detect_symm(mol._atom)
atoms = gto.format_atom(mol._atom, orig, axes)
mol.build(False, False, atom=atoms)
print(gpname)
eql_atoms = geom.symm_identical_atoms(gpname, atoms)
print(symm_adapted_basis(mol, gpname, eql_atoms)[1])
mol = gto.M(
atom = [['H', (0,0,0)], ['H', (0,0,-1)], ['He', (0,0,1)]],
basis = 'ccpvtz')
gpname, orig, axes = geom.detect_symm(mol._atom)
atoms = gto.format_atom(mol._atom, orig, axes)
mol.build(False, False, atom=atoms)
print(gpname)
eql_atoms = geom.symm_identical_atoms(gpname, atoms)
print(symm_adapted_basis(mol, gpname, eql_atoms)[1])
|
gkc1000/pyscf
|
pyscf/symm/basis.py
|
Python
|
apache-2.0
| 20,142
|
[
"PySCF"
] |
d37551cf12c3953ee28c91b706532314cfd6900c4581d365a9ef1b7d545303a2
|
import requests
from Firefly import logging, scheduler
from Firefly.const import AUTHOR
from Firefly.helpers.device import *
from Firefly.helpers.device.device import Device
from Firefly.helpers.metadata.metadata import ColorMap, action_text
from .foobot_service import STATUS_URL
TITLE = 'Foobot Air Sensor'
DEVICE_TYPE = 'air_sensor'
REQUESTS = ['air_quality', 'pm', 'temperature', 'humidity', 'c02', 'voc', 'allpollu']
COMMANDS = ['set_temp_scale']
INITIAL_VALUES = {
'_air_quality': 'unknown',
'_pm': -1,
'_temperature': -1.0,
'_humidity': -1,
'_c02': -1,
'_voc': -1,
'_allpollu': -1,
'_temp_scale': 'f',
'_refresh_interval': 15
}
SCORE_MAP = {
0: 'great',
1: 'good',
2: 'fair',
3: 'poor',
100: 'unknown'
}
'''
Sample response:
{
"uuid": "XXXXXXXXXX",
"start": 1508214354,
"end": 1508214354,
"sensors": [
"time",
"pm",
"tmp",
"hum",
"co2",
"voc",
"allpollu"
],
"units": [
"s",
"ugm3",
"C",
"pc",
"ppm",
"ppb",
"%"
],
"datapoints": [
[
1508214354,
2.5200195,
23.761,
50.453,
451,
125,
2.5200195
]
]
}
'''
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
foobot = Foobot(firefly, package, **kwargs)
firefly.install_component(foobot)
logging.info('Finished Installing Foobot')
return foobot.id
class Foobot(Device):
def __init__(self, firefly, package, **kwargs):
initial_values = kwargs.get('initial_values', {})
INITIAL_VALUES.update(initial_values)
kwargs['initial_values'] = INITIAL_VALUES
super().__init__(firefly, package, TITLE, AUTHOR, COMMANDS, REQUESTS, DEVICE_TYPE, **kwargs)
# ff_id will be the uuid of the device
self.device = kwargs.get('foobot_device')
self.api_key = kwargs.get('api_key')
self.username = kwargs.get('username')
self.refresh = kwargs.get('refresh', 15)
self.add_request('air_quality', self.get_air_quality)
self.add_request(TEMPERATURE, self.get_temperature)
self.add_request(HUMIDITY, self.get_humidity)
self.add_request('pm', self.get_pm)
self.add_request('c02', self.get_c02)
self.add_request('voc', self.get_voc)
self.add_request('allpillu', self.get_allpollu)
self.add_command('set_temp_scale', self.set_scale)
text_mapping = {
'Great': ['great'],
'Good': ['good'],
'Fair': ['fair'],
'Poor': ['poor'],
'No Reading': ['unknown']
}
color_mapping = ColorMap(green=['great'], orange=['good'], yellow=['fair'], red=['poor'], black=['unknown'])
self.add_action('air_quality', action_text(primary=True, title='Air Quality', context='Calculated Air Quality', request='air_quality', text_mapping=text_mapping, color_mapping=color_mapping))
self.add_action(TEMPERATURE, action_text(primary=False, title='Temperature', context='Last reported temperature', request=TEMPERATURE, units=self._temp_scale))
self.add_action(HUMIDITY, action_text(primary=False, title='Humidity', context='Last reported humidity', request=HUMIDITY, units='%'))
#self.update()
#TODO: Add temp reporting
self._alexa_export = False
scheduler.runInS(10, self.update)
scheduler.runEveryM(self.refresh, self.update, job_id=self.id)
def export(self, current_values: bool = True, api_view: bool = False) -> dict:
export_data = super().export(current_values, api_view)
if not api_view:
export_data.update({
'foobot_device': self.device,
'api_key': self.api_key,
'username': self.username
})
return export_data
def set_scale(self, **kwargs):
scale = kwargs.get('scale', 'f')
if scale == 'f' or scale == 'c':
self._temp_scale = scale
def get_air_quality(self, **kwargs):
score = max([self.voc_score(), self.c02_score(), self.pm_score()])
self._air_quality = SCORE_MAP[score]
return self._air_quality
def get_pm(self, **kwargs):
return self._pm
def get_temperature(self, **kwargs):
if self._temp_scale == 'c':
return self._temperature
return 9.0 / 5.0 * float(self._temperature) + 32
def get_humidity(self, **kwargs):
return self._humidity
def get_c02(self, **kwargs):
return self._co2
def get_voc(self, **kwargs):
return self._voc
def get_allpollu(self, **kwargs):
return self._allpollu
def update(self, *args, **kwargs):
url = STATUS_URL % str(self.id)
headers = {
'X-API-KEY-TOKEN': self.api_key,
}
r = requests.get(url, headers=headers)
if r.status_code != 200:
logging.message('[FOOBOT] Error refreshing: %s' % r.text)
data = r.json()
logging.info('[FOOBOT] data: %s' % str(data))
datapoints = data.get('datapoints')
if not datapoints:
return
datapoints = datapoints[0]
if len(datapoints) < 7:
return
self.store_before_state()
self._pm = datapoints[1]
self._temperature = datapoints[2]
self._humidity = datapoints[3]
self._c02 = datapoints[4]
self._voc = datapoints[5]
self._allpollu = datapoints[6]
self._last_command_source = 'Foobot Refresh'
self._last_update_time = self.firefly.location.now
self.broadcast_change()
def pm_score(self, **kwargs):
if self._pm == -1:
return 100
if self._pm <= 12.5:
return 0
if self._pm <= 25:
return 1
if self._pm <= 37.5:
return 2
return 3
def voc_score(self, **kwargs):
if self._voc == -1:
return 100
if self._voc <= 150:
return 0
if self._voc <= 300:
return 1
if self._voc <= 450:
return 2
return 3
def c02_score(self, **kwargs):
if self._c02 == -1:
return 100
if self._c02 <= 625:
return 0
if self._c02 <= 1300:
return 1
if self._c02 <= 1925:
return 2
return 3
|
Firefly-Automation/Firefly
|
Firefly/components/foobot/foobot.py
|
Python
|
apache-2.0
| 5,962
|
[
"Firefly"
] |
39b837de6518b854f950a2942231202720fa31298d0b05b4d70925ebe45794d3
|
#!/usr/bin/env python
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# tweak PYTHONPATH
import sys, string, re
import os
if not getattr(sys, "frozen", False):
os.chdir("../shared")
sys.path.append("../libs/server")
from igeclient.IClient import IClient
import pprint, traceback
from getpass import getpass
from code import InteractiveConsole
from ige.ospace import Rules
import time
from ige.Config import Config
#not race specific:
levelTechs = {1: [
1000, 1100, 1101, 1102, 1104, 1106, 1107, 1110, 1112,
1400, 1401, 1402, 1403, 1404,
1500, 1510, 1511,
1800, 1801, 1802, 1803,
],
2: [
1105, 1111,
2001, 2006,
2400, 2401, 2403, 2404, 2405, 2406, 2407, 2408, 2409,
2800, 2801, 2802, 2803,
],
3: [
3000, 3010, 3013,
3401, 3402, 3403, 3404, 3405, 3406, 3407, 3409, 3410,
3450, 3451,
3800, 3802,
],
4: [
4000, 4004, 4005,
],
5: [
5000, 5001, 5002,
5800, 5801, 5802, 5803,
],
6: [
6000, 6001, 6005, 6025,
],
99: [
99001, 99002, 99003, 99004,
]}
#race specific:
levelTechsRaces = {
1: {'B': [], 'H': [], 'C': []},
2: {
'B': [2003, 2005, 2007, 2804, 2805],
'H': [2000, 2004],
'C': [2002, 2004]},
3: {
'B': [3001, 3003, 3007, 3008, 3412, 3420, 3421, 3452, 3454, 3803, ],
'H': [3002, 3004, 3006, 3009, 3408, 3411, 3453, 3455, 3803, ],
'C': [3001, 3005, 3006, 3411, 3453, 3456, ]},
4: {
'B': [4003, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4458, 4460, 4476, 4502, 4504, ],
'H': [4002, 4009, 4010, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4459, 4461, 4477, 4479, 4480, 4500, 4503, ],
'C': [4001, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4459, 4462, 4477, 4479, 4501, 4503, ]},
5: {
'B': [5400, 5401, 5402, 5403, 5404, 5405, 5406, 5007, 5431, 5433, 5465, 5467, 5470, 5475, 5503, 5504, 5507, 5805, 5808],
'H': [5003, 5004, 5005, 5008, 5408, 5409, 5410, 5411, 5412, 5413, 5414, 5430, 5434, 5466, 5468, 5471, 5474, 5501, 5502, 5508, 5804, 5807],
'C': [5006, 5416, 5417, 5418, 5419, 5420, 5421, 5432, 5435, 5466, 5469, 5472, 5473, 5476, 5505, 5506, 5806]},
6: {
'B': [6200, 6201, 6202, 6203, 6204, 6205, 6220, 6221, 6222, 6241, ],
'H': [6100, 6101, 6102, 6103, 6104, 6105, 6120, 6121, 6140, 6141, 6160, ],
'C': [6301, 6302, 6303, 6304, 6305, 6306, 6320, 6321, 6322, 6323, 6340, 6360, ]
},
99: {'B': [], 'H': [], 'C': []},
}
advTechLevel = {
1: {},
2: {"B" : 1990, "H" : 1991, "C" : 1992},
3: {"B" : 2990, "H" : 2991, "C" : 2992},
4: {"B" : 3990, "H" : 3991, "C" : 3992},
5: {"B" : 4990, "H" : 4991, "C" : 4992},
6: {"B" : 5990, "H" : 5991, "C" : 5992},
99: {}
}
def cleanupBadFleets():
un = s.getInfo(1)
delete = []
#search by system rather than by player; there is no galaxy list of playerIDs
for galaxyID in un.galaxies:
galaxy = s.getInfo(galaxyID)
for systemID in galaxy.systems:
system = s.getInfo(systemID)
for fleetID in system.fleets:
fleet = s.getInfo(fleetID)
owner = s.getInfo(fleet.owner)
if not owner.galaxies:
delete.append((fleetID,systemID,fleet.owner,0))
if owner.galaxies[0] != galaxyID:
delete.append((fleetID,systemID,fleet.owner,1))
for row in delete:
if row[3]:
print "Deleting",row[0],"- owner not in fleet's galaxy"
else:
print "Deleting",row[0],"- owner not in a galaxy"
s.disbandFleet(row[0])
return
def msgHandler(id, data):
if id >= 0:
print 'Message', id, data
def getPlayer(name):
u = s.getInfo(1)
for playerID in u.players:
pl = s.getInfo(playerID)
if pl.name == name:
return pl
return None
def showPlayers():
un = s.getInfo(1)
players = []
for playerID in un.players:
player = s.getInfo(playerID)
players.append((playerID, player.name))
print
print
print "List of current players:"
for pl in players:
print "%5d: %s" % pl
print
print "Press Enter to continue"
raw_input()
def showGalaxies():
un = s.getInfo(1)
galaxies = []
for galaxyID in un.galaxies:
galaxy = s.getInfo(galaxyID)
galaxies.append((galaxyID, galaxy.name))
print
print
print "List of current galaxies:"
for gal in galaxies:
print "%5d: %s" % gal
print
def setCurrentObject():
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
return newObjID
def initDevelTesting(objID):
levels = 6
resources = 8
race = string.upper(raw_input("race: "))
if not (race=='b' or race=='B' or race=='c' or race=='C' or race =='h' or race =='H'):
print "Invalid race"
return objID
for level in range(2,levels+1):
advanceLevelRace(objID,level,race)
for level in range(1,levels+1):
giveTechsNum(objID,level)
giveTechsNum(objID,99)
for stratResID in range(1,resources+1):
giveStratResNum(objID,stratResID,50)
return objID
def giveTechs(objID):
lvl = raw_input("level: ")
level = 0
try:
level = int(lvl)
except:
print "Invalid level"
return objId
if level > 6 and not level == 99:
print "Invalid level"
return objId
giveTechsNum(objID,level)
def giveTechsNum(objID,level):
player = s.getInfo(objID)
plTechs = player.techs
for techId in levelTechs[level]:
plTechs[techId] = 5
if len(player.race) > 0:
print "setting race dependent techs"
for techId in levelTechsRaces[level][player.race]:
plTechs[techId] = 5
s.set(objID, "techs", plTechs)
print "Techs at level %d added to player %d." % (level, objID)
return objID
def giveTech(objID):
tid = raw_input("techId: ")
try:
techId = int(tid)
except:
print "Invalid techId"
return objID
player = s.getInfo(objID)
plTechs = player.techs
try:
plTechs[techId] = 5
except:
print "Invalid techId"
return objID
s.set(objID, "techs", plTechs)
print "Tech %d added to player %d." % (techId, objID)
return objID
def advanceLevel(objID):
lvl = raw_input("level: ")
try:
level = int(lvl)
except:
print "Invalid level"
return objID
if level > 6 or level < 2:
print "Invalid level"
return objID
race = string.upper(raw_input("race: "))
if not (race=='b' or race=='B' or race=='c' or race=='C' or race =='h' or race =='H'):
print "Invalid race"
return objID
advanceLevelRace(objID,level)
def advanceLevelRace(objID,level,race):
player = s.getInfo(objID)
plTechs = player.techs
plTechs[advTechLevel[level][race]] = 5
s.set(objID, "techs", plTechs)
s.set(objID, "techLevel", level)
s.set(objID, "race", race)
print "Tech %d added, techLevel advance to %d to player %d." % (advTechLevel[level][race], level, objID)
return objID
def promoteToImperator(objID):
galID = raw_input("galaxy id: ")
try:
galaxyID = int(galID)
except:
print "Invalid galaxy id"
return objID
s.set(objID, "imperator", 3)
s.set(galaxyID, "imperator", objID)
print "Galaxy %d has now imperator %d." % (galaxyID, objID)
return objID
def giveFame(objID):
numFame = raw_input("Amount of Fame: ")
try:
numberFame = int(numFame)
except:
print "Not a number"
return objID
player = s.getInfo(objID)
if not hasattr(player,'pirateFame'):
print "Object is not a pirate"
return objID
newFame = player.pirateFame + numberFame
s.set(objID, "pirateFame", newFame)
print "Player %d now has %d fame" % (objID, newFame)
def giveStratRes(objID):
resID = raw_input("strategy resource ('a' for all resources): ")
if not (resID == 'a'):
try:
stratResID = int(resID)
except:
print "Invalid strategy resource"
return objID
qty = raw_input("qty: ")
try:
quantity = int(qty)
except:
print "Invalid quantity"
return objID
if (resID == 'a'):
for stratResID in range(1,9):
giveStratResNum(objID,stratResID,quantity)
else:
giveStratResNum(objID,stratResID,quantity)
return objID
def giveStratResNum(objID,stratResID,quantity):
plQty = 0
player = s.getInfo(objID)
if stratResID in player.stratRes:
plQty = player.stratRes[stratResID]
stratRes = player.stratRes
stratRes[stratResID] = plQty + quantity
s.set(objID, "stratRes", stratRes)
print "Player %d has now %d pieces of %d." % (objID, stratRes[stratResID], stratResID)
def createGalaxy():
universe = 1
print "Creating new galaxy...please specify these parameters. Normal galaxy positions are multiples of 100."
name = raw_input("Galaxy Name: ")
xpos = raw_input("X Position: ")
ypos = raw_input("Y Position: ")
try:
xpos = int(xpos)
ypos = int(ypos)
except:
print "Positions not integers."
return
s.createNewGalaxy(universe, xpos, ypos, name)
def startGalaxy():
showGalaxies()
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
un = s.getInfo(1)
galaxyObj = 0
print newObjID
for galaxyID in un.galaxies:
print galaxyID
if galaxyID==newObjID:
galaxyObj = newObjID
if galaxyObj == 0:
print "Not a galaxy"
else:
s.enableTime(galaxyObj,1)
print "Galaxy will start on next turn process"
def deleteGalaxy():
showGalaxies()
print "Choose a galaxy to delete."
objId = raw_input("oid: ")
newObjID = 0
try:
newObjID = int(objId)
except:
print "Invalid object"
un = s.getInfo(1)
galaxyObj = 0
print newObjID
for galaxyID in un.galaxies:
if galaxyID==newObjID:
galaxyObjID = newObjID
if galaxyObjID == 0:
print "Not a galaxy"
else:
galaxy = s.getInfo(galaxyObjID)
print "Please confirm that you want to delete", galaxy.name
ok = raw_input("Y/N: ");
if string.upper(ok) == "Y":
s.delete(galaxyObjID)
print "Galaxy deleted"
def showObj(objID):
try:
obj = s.getInfo(objID)
objstr = repr(obj)
#insanely complex regex to chunk {data} and [data] parts during split by ,'s
objarr = re.findall("[^,\{\]]*(?:\{[^\}]*[\}\{]),?|[^,\{\]]*(?:\[[^\]]*[\]\[]),?|[^,\}\]]+,?",objstr)
for line in objarr:
print line
except:
print "Cannot get object",objID
def showMenu(objID):
print
print "----- OSpace admin console menu -----"
print "Current object: %s" % objID
print
print "1. Set current object 10. Create Galaxy"
print "2. Show Players 11. Start Galaxy Time (does not need Obj set)"
print "3. Show Galaxies 12. Delete Galaxy (does not need Obj set)"
print "4. Advance to level 13. Init Developer race (all techs, 50 each strat res)"
print "5. Make imperator 14. Give Fame to a Pirate Player"
print "6. Give particular tech "
print "7. Give techs "
print "8. Give Strat Res "
print "9. Finish prod queue "
print
print "T. Process turn R. Process X turns"
print "C. Interactive console I. Object Info"
print
print "Ctrl+Z to End"
print
def processTurns():
numT = raw_input("Number of turns: ")
try:
num = int(numT)
except:
print "invalid number of turns"
for i in range(1, num + 1):
s.processTurn()
def finishProdQueue(objId):
p = s.get(objId)
for i in p.prodQueue:
i.currProd = 38400
s.set(p.oid, "prodQueue", p.prodQueue)
def processMenu(inp, objId, s):
if inp == "2":
showPlayers()
elif inp == "1":
return setCurrentObject()
elif inp == "7":
giveTechs(objId)
elif inp == "6":
giveTech(objId)
elif inp == "4":
advanceLevel(objId)
elif inp == "3":
showGalaxies()
elif inp == "5":
promoteToImperator(objId)
elif inp == "8":
giveStratRes(objId)
elif inp == "9":
finishProdQueue(objId)
elif inp == "10":
createGalaxy()
elif inp == "11":
startGalaxy()
elif inp == "12":
deleteGalaxy()
elif inp == "13":
initDevelTesting(objID)
elif inp == "14":
giveFame(objID)
elif string.upper(inp) == "I":
showObj(objID)
elif string.upper(inp) == "R":
processTurns()
elif string.upper(inp) == "T":
s.processTurn()
elif string.upper(inp) == "C":
console = InteractiveConsole(locals())
console.interact()
elif string.upper(inp) == "CLEANUPFLEETS":
console = cleanupBadFleets()
return objId
config = Config("var/config.ini")
#s = IClient('ospace.net:9080', None, msgHandler, None, 'IClient/osc')
s = IClient('localhost:9080', None, msgHandler, None, 'IClient/osc')
if len(sys.argv) != 2:
print "Usage: osclient LOGIN"
sys.exit(1)
login = sys.argv[1]
if login == "admin":
try:
password = open(os.path.join("var", "token"), "r").read()
except IOError:
password = getpass("Password: ")
s.connect(login)
s.login(config.server.name, login, password)
try:
objID = 0
while True:
showMenu(objID)
objID = processMenu(raw_input(), objID, s)
except EOFError:
pass
s.logout()
|
OuterDeepSpace/OuterDeepSpace
|
server/osclient.py
|
Python
|
gpl-2.0
| 13,338
|
[
"Galaxy"
] |
e9801fde16de86e0c373105ae78e6194d49a6af6563e9fa779729b2a4d8d773b
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class SearchName(Rule):
"""Rule that checks for full or partial name matches"""
labels = [_('Substring:')]
name = _('People matching the <name>')
description = _("Matches people with a specified (partial) name")
category = _('General filters')
def apply(self, db, person):
src = self.list[0].upper()
if not src:
return False
for name in [person.get_primary_name()] + person.get_alternate_names():
for field in [name.first_name, name.get_surname(), name.suffix,
name.title, name.nick, name.famnick, name.call]:
if src and field.upper().find(src) != -1:
return True
return False
|
sam-m888/gprime
|
gprime/filters/rules/person/_searchname.py
|
Python
|
gpl-2.0
| 2,194
|
[
"Brian"
] |
ba11762457605ac30ce1882493fe864cf319907da8020e8a116ab54ad6578bd2
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import types
class DotVisitor(object):
"""
Generates a dot description of a graph in dictionary form.
"""
def __init__(self, annotation=None):
self.result = []
self.visited_memo = {}
self.highlights = {}
self.alternate_labeller = None
self.annotation = annotation
def labeller(self, labeller):
self.alternate_labeller = labeller
return self
def highlight_nodes(self, nodeset, color="yellow"):
for i in nodeset:
self.highlights[i] = color
return self
def visit(self, graph, node, nodename_prefix=""):
if node.name in self.visited_memo:
return self
# For printing datatype, breaks type
if node.attr.get("symbolic_datatype", None) is not None:
dtype = str(types.get_type_info(node.attr["symbolic_datatype"]))
elif node.datatype is not None:
dtype = str(types.get_type_info(node.datatype))
else:
dtype = "Unknown"
label = ""
if self.alternate_labeller is not None:
label = self.alternate_labeller(node)
else:
if len(node.outputs) == 0:
label = "\\n{" + node.name + "}"
if "Placeholder" in node.op:
label = "\\n{" + node.name + "}"
if node.op == "while":
label = (
"\\n{body: "
+ node.attr["body_function"]
+ " cond:"
+ node.attr["cond_function"]
+ "}"
)
if node.op == "function":
label = "\\n{body: " + node.attr["function_name"] + "}"
if node.op == "function_entry":
label = "\\n{" + node.name + "}"
label = node.op + ":" + dtype + label
if node.name in self.highlights:
self.result.append(
'"'
+ nodename_prefix
+ node.name
+ '"'
+ '[label="'
+ label
+ '",fillcolor=%s,style=filled,fontcolor=%s]'
% (
self.highlights[node.name],
"violetred" if node.attr.get(self.annotation, False) else "black",
)
)
else:
self.result.append(
'"'
+ nodename_prefix
+ node.name
+ '"'
+ '[label="'
+ label
+ '",fontcolor=%s]'
% ("violetred" if node.attr.get(self.annotation, False) else "black")
)
for i in node.inputs:
input_name = i
edge = (
'"'
+ nodename_prefix
+ input_name
+ '"'
+ " -> "
+ '"'
+ nodename_prefix
+ node.name
+ '"'
)
self.result.append(edge)
for i in node.control_inputs:
input_name = i
edge = (
'"'
+ nodename_prefix
+ input_name
+ '"'
+ " -> "
+ '"'
+ nodename_prefix
+ node.name
+ '"'
)
edge = edge + " [style=dotted]"
self.result.append(edge)
self.visited_memo[node.name] = 1
for i in node.inputs:
input_name = i
if input_name[0] == "^":
input_name = input_name[1:]
assert input_name in graph
self.visit(graph, graph[input_name], nodename_prefix)
return self
def visit_all(self, graph, nodename_prefix=""):
for i in graph:
self.visit(graph, graph[i], nodename_prefix)
return self
def get_result(self, graphtype="digraph", graph_name="g"):
return (
graphtype
+ " "
+ graph_name
+ " {\n\t"
+ "\n\t".join(str(i) for i in self.result)
+ ';\n\tlabel="'
+ graph_name[8:]
+ '";\n\tfontsize=96;\n}'
)
def __str__(self):
return self.get_result()
|
apple/coremltools
|
coremltools/converters/mil/frontend/tensorflow/dot_visitor.py
|
Python
|
bsd-3-clause
| 4,552
|
[
"VisIt"
] |
09003ede3951ed4f07ea7082a3a8998ce3f15528806edf8bfa8d879f144ec66f
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
linalg.pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
Titan-C/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
Python
|
bsd-3-clause
| 7,453
|
[
"Gaussian"
] |
18e3c3e52a55eb128843375546208610544ccab641b5fad89d6e2e9bd1da6d7d
|
#!/usr/bin/env python
'''
Created on Jan 5, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
import logging
import os
import shutil
import subprocess
import sys
from optparse import OptionParser
# local imports
import chimerascan.pysam as pysam
from chimerascan.lib.feature import GeneFeature
from chimerascan.lib.seq import DNA_reverse_complement
from chimerascan.lib.base import up_to_date, check_executable
from chimerascan.bx.intersection import Interval, IntervalTree
from chimerascan.lib.config import JOB_ERROR, JOB_SUCCESS, ALIGN_INDEX, \
BOWTIE_INDEX_FILE, FRAG_SIZE_INDEX, FRAG_SIZE_INDEX_FILE, \
GENE_FEATURE_FILE, GENE_REF_PREFIX, RAW_JUNCS_FILE
BASES_PER_LINE = 50
def split_seq(seq, chars_per_line):
pos = 0
newseq = []
while pos < len(seq):
if pos + chars_per_line > len(seq):
endpos = len(seq)
else:
endpos = pos + chars_per_line
newseq.append(seq[pos:endpos])
pos = endpos
return '\n'.join(newseq)
def bed12_to_fasta(gene_feature_file, reference_seq_file):
ref_fa = pysam.Fastafile(reference_seq_file)
for g in GeneFeature.parse(open(gene_feature_file)):
exon_seqs = []
error_occurred = False
for start, end in g.exons:
seq = ref_fa.fetch(g.chrom, start, end)
if not seq:
logging.warning("gene %s exon %s:%d-%d not found in reference" %
(g.tx_name, g.chrom, start, end))
error_occurred = True
break
exon_seqs.append(seq)
if error_occurred:
continue
# make fasta record
seq = ''.join(exon_seqs)
if g.strand == '-':
seq = DNA_reverse_complement(seq)
# break seq onto multiple lines
seqlines = split_seq(seq, BASES_PER_LINE)
yield (">%s range=%s:%d-%d gene=%s strand=%s\n%s" %
(GENE_REF_PREFIX + g.tx_name, g.chrom, start, end, g.gene_name, g.strand, seqlines))
ref_fa.close()
def build_exon_trees(genes):
trees = collections.defaultdict(lambda: IntervalTree())
for g in genes:
for e in g.exons:
start, end = e
trees[g.chrom].insert_interval(Interval(start, end, strand=g.strand))
return trees
def find_unambiguous_exon_intervals(genes):
"""
returns (chrom, start, end, strand) tuples for exon
intervals that are unique and have no overlapping
transcripts or exons.
"""
trees = build_exon_trees(genes)
for g in genes:
for start,end in g.exons:
hits = [(hit.start, hit.end, hit.strand)
for hit in trees[g.chrom].find(start, end)]
overlapping_hits = set([(start, end, g.strand)]).union(hits)
if len(overlapping_hits) == 1:
yield g.chrom, start, end, g.strand
def create_fragment_size_index(output_dir, gene_feature_file,
reference_seq_file, bowtie_build_bin,
max_fragment_size):
"""
make an alignment index containing sequences that can be used to
assess the fragment size distribution. these sequences must be
larger than the 'max_insert_size' in order to be viable for use
in characterizing the fragment size distribution.
"""
# parse genes file
genes = [g for g in GeneFeature.parse(open(gene_feature_file))]
# find all exons that are larger than the maximum estimated fragment size
exons = set([coord for coord in find_unambiguous_exon_intervals(genes)
if (coord[2] - coord[1]) >= max_fragment_size])
logging.info("Found %d exons larger than %d" % (len(exons), max_fragment_size))
# extract the nucleotide sequence of the exons
logging.info("Extracting sequences to use for estimating the fragment "
" size distribution")
ref_fa = pysam.Fastafile(reference_seq_file)
frag_size_fa_file = os.path.join(output_dir, "frag_size_seq.fa")
fh = open(frag_size_fa_file, 'w')
for chrom, start, end, strand in exons:
seq = ref_fa.fetch(chrom, start, end)
if not seq:
logging.warning("exon %s:%d-%d not found in reference" % (chrom, start, end))
continue
# make fasta record
if strand == '-':
seq = DNA_reverse_complement(seq)
# break seq onto multiple lines
seqlines = split_seq(seq, BASES_PER_LINE)
record = (">%s:%d-%d strand=%s\n%s" %
(chrom, start, end, strand, seqlines))
print >>fh, record
fh.close()
ref_fa.close()
# build bowtie alignment index from the fragment size exons
logging.info("Building bowtie index")
frag_size_index = os.path.join(output_dir, FRAG_SIZE_INDEX)
args = [bowtie_build_bin, frag_size_fa_file, frag_size_index]
return subprocess.call(args)
def create_tophat_juncs_file(output_dir, gene_feature_file):
"""
adapted from the 'bed_to_juncs' script distributed with the
TopHat package. http://tophat.cbcb.umd.edu
"""
line_num = 0
for line in open(gene_feature_file):
line = line.strip()
if line.startswith("#"):
continue
fields = line.split()
if len(fields) < 10:
logging.warning("Malformed line %d, missing columns" % (line_num))
continue
line_num += 1
chrom = fields[1]
strand = fields[2]
tx_start = int(fields[3])
#tx_end = int(fields[4])
exon_starts = map(int, fields[8].split(",")[:-1])
exon_ends = map(int, fields[9].split(",")[:-1])
for i in xrange(1,len(exon_starts)):
junc_start = tx_start + exon_ends[i-1] - 1
junc_end = tx_start + exon_starts[i]
yield "%s\t%d\t%d\t%s" % (chrom, junc_start, junc_end, strand)
def create_chimerascan_index(output_dir,
genome_fasta_file,
gene_feature_file,
bowtie_build_bin):
# min_fragment_size,
# max_fragment_size):
# create output dir if it does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.info("Created index directory: %s" % (output_dir))
# copy reference fasta file to output dir
index_fasta_file = os.path.join(output_dir, ALIGN_INDEX + ".fa")
if (up_to_date(index_fasta_file, genome_fasta_file) and
up_to_date(index_fasta_file, gene_feature_file)):
logging.info("[SKIPPED] Adding reference genome to index")
else:
logging.info("Adding reference genome to index")
shutil.copyfile(genome_fasta_file, index_fasta_file)
# index the genome fasta file
logging.info("Indexing FASTA file")
fh = pysam.Fastafile(index_fasta_file)
fh.close()
# append sequences from gene feature file
logging.info("Adding transcript sequences to index...")
fh = open(index_fasta_file, "a")
for fa_record in bed12_to_fasta(gene_feature_file,
index_fasta_file):
print >>fh, fa_record
fh.close()
# remove old fasta index
os.remove(index_fasta_file + ".fai")
# re-index the combined fasta file
logging.info("Re-indexing FASTA file...")
fh = pysam.Fastafile(index_fasta_file)
fh.close()
# build bowtie index on the reference sequence file
bowtie_index_file = os.path.join(output_dir, BOWTIE_INDEX_FILE)
msg = "Building bowtie index"
if up_to_date(bowtie_index_file, index_fasta_file):
logging.info("[SKIPPED] %s" % (msg))
else:
logging.info(msg)
bowtie_index_name = os.path.join(output_dir, ALIGN_INDEX)
args = [bowtie_build_bin, index_fasta_file, bowtie_index_name]
if subprocess.call(args) != os.EX_OK:
logging.error("bowtie-build failed to create alignment index")
if os.path.exists(bowtie_index_file):
os.remove(bowtie_index_file)
return JOB_ERROR
# copy gene bed file to index directory
dst_gene_feature_file = os.path.join(output_dir, GENE_FEATURE_FILE)
if up_to_date(dst_gene_feature_file, gene_feature_file):
logging.info("[SKIPPED] Adding transcript features to index...")
else:
logging.info("Adding transcript features to index...")
shutil.copyfile(gene_feature_file, dst_gene_feature_file)
# create tophat junctions file from gene features
# juncs_file = os.path.join(output_dir, TOPHAT_JUNCS_FILE)
# if up_to_date(juncs_file, dst_gene_feature_file):
# logging.info("[SKIPPED] Creating splice junction file...")
# else:
# logging.info("Creating splice junction file...")
# fh = open(juncs_file, "w")
# for junc_line in create_tophat_juncs_file(output_dir, gene_feature_file):
# print >>fh, junc_line
# fh.close()
# build special index used to discover the fragment size
# frag_size_index_file = os.path.join(output_dir, FRAG_SIZE_INDEX_FILE)
# if up_to_date(frag_size_index_file, index_fasta_file):
# logging.info("[SKIPPED] Building fragment size distribution index")
# else:
# logging.info("Building fragment size distribution index")
# retcode = create_fragment_size_index(output_dir, gene_feature_file,
# genome_fasta_file,
# bowtie_build_bin,
# max_fragment_size)
# if retcode != os.EX_OK:
# logging.error("bowtie-build failed to create fragment size "
# "distribution index")
# if os.path.exists(frag_size_index_file):
# os.remove(frag_size_index_file)
# return JOB_ERROR
logging.info("chimerascan index created successfully")
return JOB_SUCCESS
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <reference_genome.fa> "
"<gene_models.txt> <index_output_dir>")
#parser.add_option('-i', '--min-fragment-size', dest="min_fragment_size", default=0)
#parser.add_option('-I', '--max-fragment-size', dest="max_fragment_size", default=700)
parser.add_option("--bowtie-dir", dest="bowtie_dir", default="",
help="Path to the 'bowtie' software (by default, "
"expects the 'bowtie' and 'bowtie-build' "
"binaries to be in current PATH)")
options, args = parser.parse_args()
# check command line arguments
if len(args) < 3:
parser.error("Incorrect number of command line arguments")
ref_fasta_file = args[0]
gene_feature_file = args[1]
output_dir = args[2]
# check that input files exist
if not os.path.isfile(ref_fasta_file):
parser.error("Reference fasta file '%s' not found" % (ref_fasta_file))
if not os.path.isfile(gene_feature_file):
parser.error("Gene feature file '%s' not found" % (gene_feature_file))
# check that output dir is not a regular file
if os.path.exists(output_dir) and (not os.path.isdir(output_dir)):
parser.error("Output directory name '%s' exists and is not a valid "
"directory" % (output_dir))
# check that bowtie-build program exists
bowtie_build_bin = os.path.join(options.bowtie_dir, "bowtie-build")
if check_executable(bowtie_build_bin):
logging.debug("Checking for 'bowtie-build' binary... found")
else:
parser.error("bowtie-build binary not found or not executable")
# run main index creation function
retcode = create_chimerascan_index(output_dir, ref_fasta_file,
gene_feature_file,
bowtie_build_bin)
# min_fragment_size=options.min_fragment_size,
# max_fragment_size=options.max_fragment_size)
sys.exit(retcode)
if __name__ == '__main__':
main()
|
tectronics/chimerascan
|
chimerascan/deprecated/chimerascan_index_v2.py
|
Python
|
gpl-3.0
| 13,041
|
[
"Bowtie",
"pysam"
] |
a0ec567e1fb9f7a686db864928689899bc527f69407b599a340e2f9d9ca69e91
|
#!/usr/bin/env python
#JSON {"lot": "UHF/3-21G",
#JSON "scf": "PlainSCFSolver",
#JSON "er": "dense",
#JSON "difficulty": 1,
#JSON "description": "Basic UHF example with dense matrices"}
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/methyl.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '3-21G')
# Compute Gaussian integrals
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
er_vecs = obasis.compute_electron_repulsion()
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
orb_beta = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha, orb_beta)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er_vecs, 'hartree'),
UExchangeTerm(er_vecs, 'x_hf'),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons, 4 beta electrons)
occ_model = AufbauOccModel(5, 4)
# Converge WFN with plain SCF
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, olp, occ_model, orb_alpha, orb_beta)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis
mol.title = 'UHF computation on methyl'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.orb_beta = orb_beta
# useful for visualization:
mol.to_file('methyl.molden')
# useful for post-processing (results stored in double precision)
mol.to_file('methyl.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'orb_beta': orb_beta.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'ex': ham.cache["energy_x_hf"],
'hartree': ham.cache["energy_hartree"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
import numpy as np # pylint: disable=wrong-import-position
rt_previous = {
'energy': -39.331221904962412,
'ex': -6.113904009056378,
'orb_alpha': np.array([
-11.194977911345202, -0.92420112228138784, -0.55513937861886831,
-0.55513936656337781, -0.38934656780805416, 0.2535844073284213,
0.33566480311154712, 0.3356648206159526, 0.9332291232904848, 0.98518834644331721,
0.98518849306172951, 1.102449080416404, 1.3032622584429283, 1.3032623363395135,
1.6761192066890211
]),
'orb_beta': np.array([
-11.169031571491915, -0.81817737275326963, -0.53903034297663222,
-0.53903033685265866, 0.16303091192059521, 0.28378927314962432,
0.34897199801702861, 0.34897201610275758, 1.0010276475405682, 1.0010277998402939,
1.0836169709197594, 1.1060903534350119, 1.3066657923992548, 1.3066658801221429,
1.7272407098243059
]),
'hartree': 27.840836401008165,
'kin': 38.93357262027515,
'ne': -109.07151185985299,
'nn': 9.0797849426636361,
}
|
theochem/horton
|
data/examples/hf_dft/uhf_methyl_dense.py
|
Python
|
gpl-3.0
| 3,305
|
[
"Gaussian"
] |
e7267279371564e2d97299497d363c4de6dab177fd3d446c6e2551d9060a28e9
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Carlos Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# Portions copyright (c) 2012 Stanford University and the Authors.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit. Those portions are Copyright 2008-2012 Stanford University
# and Peter Eastman, and distributed under the following license:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import os
from datetime import date
import gzip
import numpy as np
import xml.etree.ElementTree as etree
from copy import copy
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.core.topology import Topology
from mdtraj.utils import ilen, cast_indices, in_units_of
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.core import element as elem
from mdtraj.utils import six
from mdtraj import version
if six.PY3:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import (uses_relative, uses_netloc, uses_params)
else:
from urllib2 import urlopen
from urlparse import urlparse
from urlparse import uses_relative, uses_netloc, uses_params
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
__all__ = ['load_pdb', 'PDBTrajectoryFile']
##############################################################################
# Code
##############################################################################
def _is_url(url):
"""Check to see if a URL has a valid protocol.
from pandas/io.common.py Copyright 2014 Pandas Developers
Used under the BSD licence
"""
try:
return urlparse(url).scheme in _VALID_URLS
except:
return False
@_FormatRegistry.register_loader('.pdb')
def load_pdb(filename, stride=None, atom_indices=None, frame=None):
"""Load a RCSB Protein Data Bank file from disk.
Parameters
----------
filename : str
Path to the PDB file on disk. The string could be a URL. Valid URL
schemes include http and ftp.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdb = md.load_pdb('2EQQ.pdb')
>>> print pdb
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBTrajectoryFile : Low level interface to PDB files
"""
from mdtraj import Trajectory
if not isinstance(filename, six.string_types):
raise TypeError('filename must be of type string for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
filename = str(filename)
with PDBTrajectoryFile(filename) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
return Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
@_FormatRegistry.register_fileobject('.pdb')
class PDBTrajectoryFile(object):
"""Interface for reading and writing Protein Data Bank (PDB) files
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
Notes
-----
When writing pdb files, mdtraj follows the PDB3.0 standard as closely as
possible. During *reading* however, we try to be more lenient. For instance,
we will parse common nonstandard atom names during reading, and convert them
into the standard names. The replacement table used by mdtraj is at
{mdtraj_source}/formats/pdb/data/pdbNames.xml.
See Also
--------
mdtraj.load_pdb : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'angstroms'
_residueNameReplacements = {}
_atomNameReplacements = {}
_chain_names = [chr(ord('A') + i) for i in range(26)]
def __init__(self, filename, mode='r', force_overwrite=True):
self._open = False
self._file = None
self._topology = None
self._positions = None
self._mode = mode
self._last_topology = None
if mode == 'r':
PDBTrajectoryFile._loadNameReplacementTables()
if _is_url(filename):
self._file = urlopen(filename)
if filename.lower().endswith('.gz'):
if six.PY3:
self._file = gzip.GzipFile(fileobj=self._file)
else:
self._file = gzip.GzipFile(fileobj=six.StringIO(
self._file.read()))
if six.PY3:
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
if filename.lower().endswith('.gz'):
self._file = gzip.open(filename, 'r')
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
self._file = open(filename, 'r')
self._read_models()
elif mode == 'w':
self._header_written = False
self._footer_written = False
if os.path.exists(filename) and not force_overwrite:
raise IOError('"%s" already exists' % filename)
self._file = open(filename, 'w')
else:
raise ValueError("invalid mode: %s" % mode)
self._open = True
def write(self, positions, topology, modelIndex=None, unitcell_lengths=None,
unitcell_angles=None, bfactors=None):
"""Write a PDB file to disk
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
modelIndex : {int, None}
If not None, the model will be surrounded by MODEL/ENDMDL records
with this index
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
bfactors : array_like, default=None, shape=(n_atoms,)
Save bfactors with pdb file. Should contain a single number for
each atom in the topology
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if not self._header_written:
self._write_header(unitcell_lengths, unitcell_angles)
self._header_written = True
if ilen(topology.atoms) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if np.any(np.isnan(positions)):
raise ValueError('Particle position is NaN')
if np.any(np.isinf(positions)):
raise ValueError('Particle position is infinite')
self._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()
if bfactors is None:
bfactors = ['{0:5.2f}'.format(0.0)] * len(positions)
else:
if (np.max(bfactors) >= 100) or (np.min(bfactors) <= -10):
raise ValueError("bfactors must be in (-10, 100)")
bfactors = ['{0:5.2f}'.format(b) for b in bfactors]
atomIndex = 1
posIndex = 0
if modelIndex is not None:
print("MODEL %4d" % modelIndex, file=self._file)
for (chainIndex, chain) in enumerate(topology.chains):
chainName = self._chain_names[chainIndex % len(self._chain_names)]
residues = list(chain.residues)
for (resIndex, res) in enumerate(residues):
if len(res.name) > 3:
resName = res.name[:3]
else:
resName = res.name
for atom in res.atoms:
if len(atom.name) < 4 and atom.name[:1].isalpha() and (atom.element is None or len(atom.element.symbol) < 2):
atomName = ' '+atom.name
elif len(atom.name) > 4:
atomName = atom.name[:4]
else:
atomName = atom.name
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = ' '
line = "ATOM %5d %-4s %3s %s%4d %s%s%s 1.00 %s %2s " % (
atomIndex % 100000, atomName, resName, chainName,
(res.resSeq) % 10000, _format_83(coords[0]),
_format_83(coords[1]), _format_83(coords[2]),
bfactors[posIndex], symbol)
assert len(line) == 80, 'Fixed width overflow detected'
print(line, file=self._file)
posIndex += 1
atomIndex += 1
if resIndex == len(residues)-1:
print("TER %5d %3s %s%4d" % (atomIndex, resName, chainName, res.resSeq), file=self._file)
atomIndex += 1
if modelIndex is not None:
print("ENDMDL", file=self._file)
def _write_header(self, unitcell_lengths, unitcell_angles, write_metadata=True):
"""Write out the header for a PDB file.
Parameters
----------
unitcell_lengths : {tuple, None}
The lengths of the three unitcell vectors, ``a``, ``b``, ``c``
unitcell_angles : {tuple, None}
The angles between the three unitcell vectors, ``alpha``,
``beta``, ``gamma``
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if unitcell_lengths is None and unitcell_angles is None:
return
if unitcell_lengths is not None and unitcell_angles is not None:
if not len(unitcell_lengths) == 3:
raise ValueError('unitcell_lengths must be length 3')
if not len(unitcell_angles) == 3:
raise ValueError('unitcell_angles must be length 3')
else:
raise ValueError('either unitcell_lengths and unitcell_angles'
'should both be spefied, or neither')
box = list(unitcell_lengths) + list(unitcell_angles)
assert len(box) == 6
if write_metadata:
print("REMARK 1 CREATED WITH MDTraj %s, %s" % (version.version, str(date.today())), file=self._file)
print("CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1 " % tuple(box), file=self._file)
def _write_footer(self):
if not self._mode == 'w':
raise ValueError('file not opened for writing')
# Identify bonds that should be listed as CONECT records.
standardResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL',
'A', 'G', 'C', 'U', 'I', 'DA', 'DG', 'DC', 'DT', 'DI', 'HOH']
conectBonds = []
if self._last_topology is not None:
for atom1, atom2 in self._last_topology.bonds:
if atom1.residue.name not in standardResidues or atom2.residue.name not in standardResidues:
conectBonds.append((atom1, atom2))
elif atom1.name == 'SG' and atom2.name == 'SG' and atom1.residue.name == 'CYS' and atom2.residue.name == 'CYS':
conectBonds.append((atom1, atom2))
if len(conectBonds) > 0:
# Work out the index used in the PDB file for each atom.
atomIndex = {}
nextAtomIndex = 0
prevChain = None
for chain in self._last_topology.chains:
for atom in chain.atoms:
if atom.residue.chain != prevChain:
nextAtomIndex += 1
prevChain = atom.residue.chain
atomIndex[atom] = nextAtomIndex
nextAtomIndex += 1
# Record which other atoms each atom is bonded to.
atomBonds = {}
for atom1, atom2 in conectBonds:
index1 = atomIndex[atom1]
index2 = atomIndex[atom2]
if index1 not in atomBonds:
atomBonds[index1] = []
if index2 not in atomBonds:
atomBonds[index2] = []
atomBonds[index1].append(index2)
atomBonds[index2].append(index1)
# Write the CONECT records.
for index1 in sorted(atomBonds):
bonded = atomBonds[index1]
while len(bonded) > 4:
print("CONECT%5d%5d%5d%5d" % (index1, bonded[0], bonded[1], bonded[2]), file=self._file)
del bonded[:4]
line = "CONECT%5d" % index1
for index2 in bonded:
line = "%s%5d" % (line, index2)
print(line, file=self._file)
print("END", file=self._file)
self._footer_written = True
@classmethod
def set_chain_names(cls, values):
"""Set the cycle of chain names used when writing PDB files
When writing PDB files, PDBTrajectoryFile translates each chain's
index into a name -- the name is what's written in the file. By
default, chains are named with the letters A-Z.
Parameters
----------
values : list
A list of chacters (strings of length 1) that the PDB writer will
cycle through to choose chain names.
"""
for item in values:
if not isinstance(item, six.string_types) and len(item) == 1:
raise TypeError('Names must be a single character string')
cls._chain_names = values
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'
"""
return self._positions
@property
def topology(self):
"""The topology from this PDB file. Available when a file is opened in mode='r'
"""
return self._topology
@property
def unitcell_lengths(self):
"The unitcell lengths (3-tuple) in this PDB file. May be None"
return self._unitcell_lengths
@property
def unitcell_angles(self):
"The unitcell angles (3-tuple) in this PDB file. May be None"
return self._unitcell_angles
@property
def closed(self):
"Whether the file is closed"
return not self._open
def close(self):
"Close the PDB file"
if self._mode == 'w' and not self._footer_written:
self._write_footer()
if self._open:
self._file.close()
self._open = False
def _read_models(self):
if not self._mode == 'r':
raise ValueError('file not opened for reading')
self._topology = Topology()
pdb = PdbStructure(self._file, load_all_models=True)
atomByNumber = {}
for chain in pdb.iter_chains():
c = self._topology.add_chain()
for residue in chain.iter_residues():
resName = residue.get_name()
if resName in PDBTrajectoryFile._residueNameReplacements:
resName = PDBTrajectoryFile._residueNameReplacements[resName]
r = self._topology.add_residue(resName, c, residue.number)
if resName in PDBTrajectoryFile._atomNameReplacements:
atomReplacements = PDBTrajectoryFile._atomNameReplacements[resName]
else:
atomReplacements = {}
for atom in residue.atoms:
atomName = atom.get_name()
if atomName in atomReplacements:
atomName = atomReplacements[atomName]
atomName = atomName.strip()
element = atom.element
if element is None:
element = self._guess_element(atomName, residue)
newAtom = self._topology.add_atom(atomName, element, r, serial=atom.serial_number)
atomByNumber[atom.serial_number] = newAtom
# load all of the positions (from every model)
_positions = []
for model in pdb.iter_models(use_all_models=True):
coords = []
for chain in model.iter_chains():
for residue in chain.iter_residues():
for atom in residue.atoms:
coords.append(atom.get_position())
_positions.append(coords)
if not all(len(f) == len(_positions[0]) for f in _positions):
raise ValueError('PDB Error: All MODELs must contain the same number of ATOMs')
self._positions = np.array(_positions)
## The atom positions read from the PDB file
self._unitcell_lengths = pdb.get_unit_cell_lengths()
self._unitcell_angles = pdb.get_unit_cell_angles()
self._topology.create_standard_bonds()
self._topology.create_disulfide_bonds(self.positions[0])
# Add bonds based on CONECT records.
connectBonds = []
for connect in pdb.models[0].connects:
i = connect[0]
for j in connect[1:]:
if i in atomByNumber and j in atomByNumber:
connectBonds.append((atomByNumber[i], atomByNumber[j]))
if len(connectBonds) > 0:
# Only add bonds that don't already exist.
existingBonds = set(self._topology.bonds)
for bond in connectBonds:
if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
self._topology.add_bond(bond[0], bond[1])
existingBonds.add(bond)
@staticmethod
def _loadNameReplacementTables():
"""Load the list of atom and residue name replacements."""
if len(PDBTrajectoryFile._residueNameReplacements) == 0:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
allResidues = {}
proteinResidues = {}
nucleicAcidResidues = {}
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
if name == 'All':
PDBTrajectoryFile._parseResidueAtoms(residue, allResidues)
elif name == 'Protein':
PDBTrajectoryFile._parseResidueAtoms(residue, proteinResidues)
elif name == 'Nucleic':
PDBTrajectoryFile._parseResidueAtoms(residue, nucleicAcidResidues)
for atom in allResidues:
proteinResidues[atom] = allResidues[atom]
nucleicAcidResidues[atom] = allResidues[atom]
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
for id in residue.attrib:
if id == 'name' or id.startswith('alt'):
PDBTrajectoryFile._residueNameReplacements[residue.attrib[id]] = name
if 'type' not in residue.attrib:
atoms = copy(allResidues)
elif residue.attrib['type'] == 'Protein':
atoms = copy(proteinResidues)
elif residue.attrib['type'] == 'Nucleic':
atoms = copy(nucleicAcidResidues)
else:
atoms = copy(allResidues)
PDBTrajectoryFile._parseResidueAtoms(residue, atoms)
PDBTrajectoryFile._atomNameReplacements[name] = atoms
def _guess_element(self, atom_name, residue):
"Try to guess the element name"
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('BE'):
element = elem.beryllium
elif upper.startswith('LI'):
element = elem.lithium
elif upper.startswith('K'):
element = elem.potassium
elif upper.startswith('ZN'):
element = elem.zinc
elif len(residue) == 1 and upper.startswith('CA'):
element = elem.calcium
# TJL has edited this. There are a few issues here. First,
# parsing for the element is non-trivial, so I do my best
# below. Second, there is additional parsing code in
# pdbstructure.py, and I am unsure why it doesn't get used
# here...
elif len(residue) > 1 and upper.startswith('CE'):
element = elem.carbon # (probably) not Celenium...
elif len(residue) > 1 and upper.startswith('CD'):
element = elem.carbon # (probably) not Cadmium...
elif residue.name in ['TRP', 'ARG', 'GLN', 'HIS'] and upper.startswith('NE'):
element = elem.nitrogen # (probably) not Neon...
elif residue.name in ['ASN'] and upper.startswith('ND'):
element = elem.nitrogen # (probably) not ND...
elif residue.name == 'CYS' and upper.startswith('SG'):
element = elem.sulfur # (probably) not SG...
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
try:
symbol = atom_name[0:2].strip().rstrip("AB0123456789").lstrip("0123456789")
element = elem.get_by_symbol(symbol)
except KeyError:
element = None
return element
@staticmethod
def _parseResidueAtoms(residue, map):
for atom in residue.findall('Atom'):
name = atom.attrib['name']
for id in atom.attrib:
map[atom.attrib[id]] = name
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._positions)
def _format_83(f):
"""Format a single float into a string of width 8, with ideally 3 decimal
places of precision. If the number is a little too large, we can
gracefully degrade the precision by lopping off some of the decimal
places. If it's much too large, we throw a ValueError"""
if -999.999 < f < 9999.999:
return '%8.3f' % f
if -9999999 < f < 99999999:
return ('%8.3f' % f)[:8]
raise ValueError('coordinate "%s" could not be represnted '
'in a width-8 field' % f)
|
kyleabeauchamp/mdtraj
|
mdtraj/formats/pdb/pdbfile.py
|
Python
|
lgpl-2.1
| 27,393
|
[
"MDTraj",
"OpenMM"
] |
fc056e23710af75cb352fae74e1b717fc01ae183f488ae247be552c1cedede19
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Miscellaneous ressources. """
__all__ = ['testValidProgram', 'copyfile', 'Changedir',
'read_input', 'exec_input', 'load',
'RelativePath', 'LockFile', 'open_exclusive', 'translate_to_regex',
'mkdtemp', 'Redirect', 'local_path']
from types import ModuleType
from sys import version_info
from .changedir import Changedir
from .relativepath import RelativePath
from .lockfile import LockFile, open_exclusive
testValidProgram = None
"""
Validation test program name
"""
def local_path(path=None, *args):
""" Returns a py.path, expanding environment variables """
from os.path import expandvars
from py.path import local
if path is None:
return local(*args)
if isinstance(path, str):
return local(expandvars(path), expanduser=True).join(*args)
return path.join(*args)
def chdir(path=None, *args):
""" Context for changing directory
Ensures the directory exists first.
"""
path = local_path(path, *args)
path.ensure(dir=True)
return path.as_cwd()
def setTestValidProgram(pgm):
import os
global testValidProgram
if pgm == None:
testValidProgram = None
else:
testValidProgram = os.path.expanduser(pgm)
def _copyfile_impl(src, dest):
""" Copies files by hand.
Makes sure that files are actually copied to disk, as opposed to
buffered. Does not check for existence or anything.
"""
from os import stat, fsync
stepsize = 2**20
size = stat(src).st_size
if size == 0:
with open(dest, 'wb') as outfile:
pass
return
with open(dest, 'wb') as outfile:
with open(src, 'rb') as infile:
steps = [stepsize] * (size // stepsize)
if size % stepsize != 0:
steps += [size % stepsize]
for step in steps:
buffer = infile.read(step)
if buffer is None:
break
outfile.write(buffer)
# makes sure stuff is written to disk prior to returning.
outfile.flush()
fsync(outfile.fileno())
def copyfile(src, dest=None, nothrow=None, symlink=False, aslink=False, nocopyempty=False):
""" Copy ``src`` file onto ``dest`` directory or file.
:param src:
Source file.
:param dest:
Destination file or directory.
:param nothrow:
Throwing is disable selectively depending on the content of nothrow:
- exists: will not throw is src does not exist.
- isfile: will not throw is src is not a file.
- same: will not throw if src and dest are the same.
- none: ``src`` can be None.
- null: ``src`` can be '/dev/null'.
- never: will never throw.
:param symlink:
Creates link rather than actual hard-copy. Symlink are
created with relative paths given starting from the directory of
``dest``. Defaults to False.
:param aslink:
Creates link rather than actual hard-copy *if* ``src`` is
itself a link. Links to the file which ``src`` points to, not to
``src`` itself. Defaults to False.
:parma nocopyempty:
Does not perform copy if file is empty. Defaults to False.
This function fails selectively, depending on what is in ``nothrow`` list.
"""
try:
from os import getcwd, symlink as ln, remove
from os.path import isdir, isfile, samefile, exists, basename, dirname,\
join, islink, realpath, relpath, getsize, abspath
# sets up nothrow options.
if nothrow is None:
nothrow = []
if isinstance(nothrow, str):
nothrow = nothrow.split()
if nothrow == 'all':
nothrow = 'exists', 'same', 'isfile', 'none', 'null'
nothrow = [u.lower() for u in nothrow]
# checks and normalizes input.
if src is None:
if 'none' in nothrow:
return False
raise IOError("Source is None.")
if dest is None:
dest = getcwd()
if dest == '/dev/null':
return True
if src == '/dev/null':
if 'null' in nothrow:
return False
raise IOError("Source is '/dev/null' but Destination is {0}.".format(dest))
# checks that input source file exists.
if not exists(src):
if 'exists' in nothrow:
return False
raise IOError("{0} does not exist.".format(src))
src = abspath(realpath(src))
if not isfile(src):
if 'isfile' in nothrow:
return False
raise IOError("{0} is not a file.".format(src))
# makes destination a file.
if exists(dest) and isdir(dest):
dest = join(dest, basename(src))
# checks if destination file and source file are the same.
if exists(dest) and samefile(src, dest):
if 'same' in nothrow:
return False
raise IOError("{0} and {1} are the same file.".format(src, dest))
if nocopyempty and isfile(src):
if getsize(src) == 0:
return
if aslink and islink(src):
symlink, src = True, realpath(src)
if symlink:
if exists(dest):
remove(dest)
src = realpath(abspath(src))
dest = realpath(abspath(dest))
if relpath(src, dirname(dest)).count("../") == relpath(src, '/').count("../"):
ln(src, realpath(dest))
else:
with chdir(local_path(dest).dirname):
ln(relpath(src, dirname(dest)), basename(dest))
else:
_copyfile_impl(src, dest)
except:
if 'never' in nothrow:
return False
raise
else:
return True
class Input(ModuleType):
""" Fake class which will be updated with the local dictionary. """
def __init__(self, name="pylada_input"):
""" Initializes input module. """
super(Input, self).__init__(name, "Input module for pylada scripts.")
def __getattr__(self, name):
raise AttributeError("All out of cheese!\n"
"Required input parameter '{0}' not found in {1}."
.format(name, self.__name__))
def __delattr__(self, name):
raise RuntimeError("Cannot delete object from input namespace.")
def __setattr__(self, name, value):
raise RuntimeError("Cannot set/change object in input namespace.")
def update(self, other):
if hasattr(other, '__dict__'):
other = other.__dict__
for key, value in other.items():
if key[0] == '_':
continue
super(Input, self).__setattr__(key, value)
@property
def __all__(self):
return list([u for u in self.__dict__.keys() if u[0] != '_'])
def __contains__(self, name):
return name in self.__dict__
def read_input(filename='input.py', global_dict=None, local_dict=None, paths=None, comm=None):
""" Reads and executes input script and returns local dictionary (as namespace instance). """
from os.path import exists, basename
assert exists(filename), IOError('File {0} does not exist.'.format(filename))
with open(filename, 'r') as file:
string = file.read()
return exec_input(string, global_dict, local_dict, paths, basename(filename))
def exec_input(script, global_dict=None, local_dict=None,
paths=None, name=None):
""" Executes input script and returns local dictionary (as namespace instance). """
# stuff to import into script.
from os import environ
from os.path import abspath, expanduser
from math import pi
from numpy import array, matrix, dot, sqrt, abs, ceil
from numpy.linalg import norm, det
from .. import crystal
from . import Input
from pylada import logger
import quantities
logger.debug("misc/init: exec_input: entry")
# Add some names to execution environment.
if global_dict is None:
global_dict = {}
global_dict.update({"environ": environ, "pi": pi, "array": array, "matrix": matrix, "dot": dot,
"norm": norm, "sqrt": sqrt, "ceil": ceil, "abs": abs, "det": det,
"expanduser": expanduser, "load": load})
for key, value in quantities.__dict__.items():
if key[0] != '_' and key not in global_dict:
global_dict[key] = value
for key in crystal.__all__:
global_dict[key] = getattr(crystal, key)
if local_dict is None:
local_dict = {}
# Executes input script.
logger.debug('misc/init: exec_input: ========== start script ==========')
logger.debug(script)
logger.debug('misc/init: exec_input: ========== end script ==========')
exec(script, global_dict, local_dict)
# Makes sure expected paths are absolute.
if paths is not None:
for path in paths:
if path not in local_dict:
continue
local_dict[path] = abspath(expanduser(local_dict[path]))
if name is None:
name = 'None'
result = Input(name)
result.update(local_dict)
return result
def load(data, *args, **kwargs):
""" Loads data from the data files. """
from os import environ
from os.path import dirname, exists, join
if "directory" in kwargs:
raise KeyError("directory is a reserved keyword of load")
# find all possible data directories
directories = []
if "data_directory" in globals():
directory = globals()["data_directory"]
if hasattr(directory, "__iter__"):
directories.extend(directory)
else:
directories.append(directory)
if "PYLADA_DATA_DIRECTORY" in environ:
directories.extend(environ["PYLADA_DATA_DIRECTORY"].split(":"))
# then looks for data file.
if data.rfind(".py") == -1:
data += ".py"
for directory in directories:
if exists(join(directory, data)):
kwargs["directory"] = dirname(join(directory, data))
result = {}
exec(compile(open(join(directory, data)).read(), join(directory, data), 'exec'), {}, result)
return result["init"](*args, **kwargs)
raise IOError("Could not find data ({0}).".format(data))
def add_setter(method, docstring=None):
""" Adds an input-like setter property. """
def _not_available(self): raise RuntimeError("Error: No cheese available.")
if docstring is None and hasattr(method, "__doc__"):
docstring = method.__doc__
return property(fget=_not_available, fset=method, doc=docstring)
def import_dictionary(self, modules=None):
""" Creates a dictionary of import modules. """
if modules is None:
modules = {}
avoids = ['__builtin__', 'quantities.quantity']
if self.__class__.__module__ not in avoids:
if self.__class__.__module__ not in modules:
modules[self.__class__.__module__] = {self.__class__.__name__}
else:
modules[self.__class__.__module__].add(self.__class__.__name__)
if not hasattr(self, '__dict__'):
return modules
for value in self.__dict__.values():
class_, module_ = value.__class__.__name__, value.__class__.__module__
if module_ in avoids:
continue
if module_ in modules:
modules[module_].add(class_)
else:
modules[module_] = {class_}
return modules
def import_header_string(modules):
""" Creates string from dictionary of import modules. """
result = ''
for key, values in modules.items():
result += "from {0} import {1}\n".format(key, ", ".join(values))
return result
def translate_to_regex(pat):
""" Translates a pattern from unix to re.
Compared to fnmatch.translate, doesn't use '.', but rather '[^/]'.
And doesn't add the tail that fnmatch.translate does.
Otherwise, code is taked from fnmatch.translate.
"""
from re import escape
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '*':
res = res + '[^/]*'
elif c == '?':
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '{0}[{0}]'.format(res, stuff)
else:
res = res + escape(c)
return res
def latest_file(*args):
""" Path of latest file.
Check each argument if it exists and is non-empty and is a file. If there
are more than one, returns the latest. If there are none, returns None.
:param *args:
Path to files for which to perform comparison.
:returns: path to the latest file or None
"""
from os.path import exists, getsize, isfile
from os import stat
from operator import itemgetter
if len(args) == 0:
return None
dummy = []
for filename in args:
path = RelativePath(filename).path
if not exists(path):
continue
if not isfile(path):
continue
if getsize(path) == 0:
continue
dummy.append((path, stat(path).st_mtime))
if len(dummy) == 0:
return None
dummy = sorted(dummy, key=itemgetter(1))
return dummy[-1][0]
def mkdtemp(suffix='', prefix='', dir=None):
""" Creates and returns temporary directory.
Makes it easier to get all Pylada tmp directories in the same place,
while retaining a certain amount of flexibility when on a
supercomputer. It first checks for a PBS_TMPDIR ernvironment variable.
If that does not exist, then it checks for a PYLADA_TMPDIR environment
variable. If that does not exist, it checks wether
:py:data:`~pylada.global_tmpdir` is not None. If that does not exist,
then it uses the directory provided in the input.
Once ``dir`` has been determined, it calls python's mkdtemp.
:param suffix: A suffix to the temporary directory
:param prefix: A prefix to the temporary directory.
:param dir: Last alternative for root of tmp directories.
"""
from os import environ
from tempfile import mkdtemp as pymkdtemp
from datetime import datetime
from .. import global_tmpdir
rootdir = environ.get('PBS_TMPDIR',
environ.get('PYLADA_TMPDIR', global_tmpdir))
if rootdir is None:
rootdir = dir
rootdir = RelativePath(rootdir).path
if len(prefix) == 0:
prefix = str(datetime.today())
else:
prefix = '{0}_{1}'.format(str(datetime.today()).replace(' ', '-'),
prefix)
return pymkdtemp(prefix=prefix, suffix=suffix, dir=rootdir)
class Redirect:
""" Redirects python input, output, error.
Usage is as follows:
:code-block: python
with Redirect('something.out', ['out', 'err']):
print 'something'
The above will redirect the python output and error to 'something.out'
until the close of the ``with`` statement.
"""
def __init__(self, filename, units='out', append=False):
""" Creates a redirection context. """
from ..misc import Sequence
from ..error import input as InputError
units = set(units) if isinstance(units, Sequence) else {units}
if len(units - {'in', 'out', 'err'}) != 0:
raise InputError('Redirect: input should be one of "in", "out", "err".')
self.units = units
self.filename = filename
self.append = append
def __enter__(self):
from os.path import abspath
import sys
self.old = {}
if 'in' in self.units:
self.old['in'] = sys.stdin
if 'out' in self.units:
self.old['out'] = sys.stdout
if 'err' in self.units:
self.old['err'] = sys.stderr
self.file = open(self.filename if len(self.filename) else "/dev/null",
"a" if self.append else "w")
if 'in' in self.units:
sys.stdin = self.file
if 'out' in self.units:
sys.stdout = self.file
if 'err' in self.units:
sys.stderr = self.file
return abspath(self.file.name)
def __exit__(self, *wargs):
import sys
if 'in' in self.units and 'in' in self.old:
sys.stdin = self.old.pop('in')
if 'err' in self.units and 'err' in self.old:
sys.stderr = self.old.pop('err')
if 'out' in self.units and 'out' in self.old:
sys.stdout = self.old.pop('out')
self.file.close()
del self.old
del self.file
if version_info[0] == 2:
from collections import Sequence, Iterable, MutableSequence, Mapping, MutableMapping
cmdl_input = raw_input
else:
from typing import Sequence, Iterable, MutableSequence, Mapping, MutableMapping
cmdl_input = input
|
pylada/pylada-light
|
src/pylada/misc/__init__.py
|
Python
|
gpl-3.0
| 18,803
|
[
"CRYSTAL",
"VASP"
] |
818bdbf247ec80439fb885dd245b83114a26971d8e39791c93656cb6b9857242
|
"""Pipeline utilities to retrieve FASTQ formatted files for processing.
"""
import os
import sys
from bcbio import bam, broad, utils
from bcbio.bam import fastq
from bcbio.bam import cram
from bcbio.distributed import objectstore
from bcbio.pipeline import alignment
from bcbio.utils import file_exists, safe_makedir, splitext_plus
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
def get_fastq_files(data):
"""Retrieve fastq files for the given lane, ready to process.
"""
assert "files" in data, "Did not find `files` in input; nothing to process"
ready_files = []
should_gzip = True
# Bowtie does not accept gzipped fastq
if 'bowtie' in data['reference'].keys():
should_gzip = False
for fname in data["files"]:
if fname.endswith(".bam"):
if _pipeline_needs_fastq(data["config"], data):
ready_files = _convert_bam_to_fastq(fname, data["dirs"]["work"],
data, data["dirs"], data["config"])
else:
ready_files = [fname]
elif objectstore.is_remote(fname):
ready_files.append(fname)
else:
ready_files.append(fname)
ready_files = [x for x in ready_files if x is not None]
if should_gzip:
ready_files = [_gzip_fastq(x) for x in ready_files]
for in_file in ready_files:
if not objectstore.is_remote(in_file):
assert os.path.exists(in_file), "%s does not exist." % in_file
return ((ready_files[0] if len(ready_files) > 0 else None),
(ready_files[1] if len(ready_files) > 1 else None))
def _gzip_fastq(in_file):
"""
gzip a fastq file if it is not already gzipped, handling conversion
from bzip to gzipped files
"""
if fastq.is_fastq(in_file) and not objectstore.is_remote(in_file):
if utils.is_bzipped(in_file):
return _bzip_gzip(in_file)
elif not utils.is_gzipped(in_file):
gzipped_file = in_file + ".gz"
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file}.".format(in_file=in_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("gzip -c {in_file} > {tx_gzipped_file}".format(**locals()),
message)
return gzipped_file
return in_file
def _bzip_gzip(in_file):
"""
convert from bz2 to gz
"""
if not utils.is_bzipped(in_file):
return in_file
base, first_ext = os.path.splitext(in_file)
gzipped_file = base + ".gz"
if (fastq.is_fastq(base) and
not objectstore.is_remote(in_file)):
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file}.".format(in_file=in_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("bunzip2 -c {in_file} | gzip > {tx_gzipped_file}".format(**locals()), message)
return gzipped_file
return in_file
def _pipeline_needs_fastq(config, data):
"""Determine if the pipeline can proceed with a BAM file, or needs fastq conversion.
"""
aligner = config["algorithm"].get("aligner")
support_bam = aligner in alignment.metadata.get("support_bam", [])
return aligner and not support_bam
def _convert_bam_to_fastq(in_file, work_dir, data, dirs, config):
"""Convert BAM input file into FASTQ files.
"""
out_dir = safe_makedir(os.path.join(work_dir, "fastq_convert"))
qual_bin_method = config["algorithm"].get("quality_bin")
if (qual_bin_method == "prealignment" or
(isinstance(qual_bin_method, list) and "prealignment" in qual_bin_method)):
out_bindir = safe_makedir(os.path.join(out_dir, "qualbin"))
in_file = cram.illumina_qual_bin(in_file, data["sam_ref"], out_bindir, config)
out_files = [os.path.join(out_dir, "{0}_{1}.fastq".format(
os.path.splitext(os.path.basename(in_file))[0], x))
for x in ["1", "2"]]
if bam.is_paired(in_file):
out1, out2 = out_files
else:
out1 = out_files[0]
out2 = None
if not file_exists(out1):
broad_runner = broad.runner_from_config(config)
broad_runner.run_fn("picard_bam_to_fastq", in_file, out1, out2)
if out2 and os.path.getsize(out2) == 0:
out2 = None
return [out1, out2]
def merge(files, out_file, config):
"""merge smartly fastq files. It recognizes paired fastq files."""
pair1 = [fastq_file[0] for fastq_file in files]
if len(files[0]) > 1:
path = splitext_plus(out_file)
pair1_out_file = path[0] + "_R1" + path[1]
pair2 = [fastq_file[1] for fastq_file in files]
pair2_out_file = path[0] + "_R2" + path[1]
_merge_list_fastqs(pair1, pair1_out_file, config)
_merge_list_fastqs(pair2, pair2_out_file, config)
return [pair1_out_file, pair2_out_file]
else:
return _merge_list_fastqs(pair1, out_file, config)
def _merge_list_fastqs(files, out_file, config):
"""merge list of fastq files into one"""
if not all(map(fastq.is_fastq, files)):
raise ValueError("Not all of the files to merge are fastq files: %s " % (files))
assert all(map(utils.file_exists, files)), ("Not all of the files to merge "
"exist: %s" % (files))
if not os.path.exists(out_file):
files = [_bzip_gzip(fn) for fn in files]
if len(files) == 1:
os.symlink(files[0], out_file)
return out_file
gz_files = [_gzip_fastq(fn) for fn in files]
with file_transaction(out_file) as file_txt_out:
files_str = " ".join(list(gz_files))
cmd = "cat {files_str} > {file_txt_out}".format(**locals())
do.run(cmd, "merge fastq files")
return out_file
|
lpantano/bcbio-nextgen
|
bcbio/pipeline/fastq.py
|
Python
|
mit
| 5,917
|
[
"Bowtie"
] |
279f411b1f61d6abfecb03f344db4b0532782b6247128b4021d9589738665955
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
This module contains a few small sphinx extensions.
They are mainly used to help with the generation
of BPS's own documentation, but some other projects
use them as well, so they are kept here.
"""
import re
import os.path
__version__ = "1.4"
def get_theme_dir():
"return path to directory containing sphinx themes in this package"
return os.path.abspath(os.path.join(__file__,os.path.pardir, "themes"))
def get_version(release):
"derive short version string from longer release"
return re.match("(\d+\.\d+)", release).group(1)
|
kannon92/psi4
|
doc/sphinxman/source/psi4doc/__init__.py
|
Python
|
gpl-2.0
| 1,500
|
[
"Psi4"
] |
72e610df8c1a34bff15cd98259b83f234f890ca267052bcc116e971d594f566a
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from hrl_msgs.msg import FloatArrayBare
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Exploration')
from data_variable_length_force_sample import Fmat_original
def compute_contact_regions(arr_2d, threshold):
mask = arr_2d > threshold
label_im, nb_labels = ni.label(mask)
return label_im, nb_labels
def compute_obj_labels(label_im, nb_labels):
i=1
local_nb_label_first_obj = -1
local_nb_label_second_obj = -1
index = np.zeros(nb_labels)
row,col = np.shape(label_im)
while i <= nb_labels:
j=0
while j < row:
k=0
while k < col:
if label_im[j][k] == i:
index[i-1] = index[i-1]+1
k=k+1
j=j+1
i=i+1
temp=0
max_index_1 = np.max(index)
#print max_index_1
while temp < len(index):
if index[temp] == max_index_1 and max_index_1 > 0:
local_nb_label_first_obj = temp+1
temp = temp+1
index[local_nb_label_first_obj-1]=-1
max_index_2 = np.max(index)
#print max_index_2
temp=0
while temp < len(index):
if index[temp] == max_index_2 and max_index_2 > 0:
local_nb_label_second_obj = temp+1
temp = temp+1
return local_nb_label_first_obj, local_nb_label_second_obj, max_index_1, max_index_2
def compute_resultant_force_magnitudes(force_arr, label_im, nb_label):
total_force = ni.sum(force_arr, label_im, nb_label)
return total_force
def compute_max_force(force_arr, label_im, nb_label):
max_force = ni.maximum(force_arr, label_im, nb_label)
return max_force
def compute_center_of_pressure(cx_arr, cy_arr, cz_arr, label_im,
nb_label):
cx = ni.mean(cx_arr, label_im, nb_label)
cy = ni.mean(cy_arr, label_im, nb_label)
cz = ni.mean(cz_arr, label_im, nb_label)
contact_vector = np.column_stack([cx, cy, cz])
return contact_vector
def track_object_connected_component(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1):
global cop_global_first_obj_prev
global cop_global_second_obj_prev
global iterindex
cop_local_first_obj1 = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj1)
cop_global_first_obj1 = r1*(cop_local_first_obj1.T) + t1
cop_local_second_obj1 = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj1)
cop_global_second_obj1 = r1*(cop_local_second_obj1.T) + t1
#print lin.norm(cop_global_first_obj1-cop_global_first_obj_prev)
if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) and (iterindex > 0):
#if (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev)) and (iterindex > 0):
#if ((lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) or (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev))) and (iterindex > 0):
#if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) > 0.005) and (iterindex > 0):
#print "Need to be Exchanged: ", total_contact_first_obj1, " compared to ", total_contact_second_obj1
#print "Need to be Exchanged: ", nb_label_first_obj1, " compared to ", nb_label_second_obj1
temp1 = nb_label_first_obj1
temp2 = total_contact_first_obj1
nb_label_first_obj1 = nb_label_second_obj1
total_contact_first_obj1 = total_contact_second_obj1
nb_label_second_obj1 = temp1
total_contact_second_obj1 = temp2
cop_global_first_obj_prev = cop_global_second_obj1
cop_global_second_obj_prev = cop_global_first_obj1
print "The connected component corresponding to the first object is not the largest"
else:
cop_global_first_obj_prev = cop_global_first_obj1
cop_global_second_obj_prev = cop_global_second_obj1
#print "The largest connected component is the first object"
iterindex = iterindex + 1
return nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1
def track_object_connected_component_single(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1):
global cop_global_first_obj_prev
global cop_global_second_obj_prev
global iterindex
cop_local_first_obj1 = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj1)
cop_global_first_obj1 = r1*(cop_local_first_obj1.T) + t1
#print lin.norm(cop_global_first_obj1-cop_global_first_obj_prev)
if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) and (iterindex > 0):
#if (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev)) and (iterindex > 0):
#if ((lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) or (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev))) and (iterindex > 0):
#if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) > 0.005) and (iterindex > 0):
#print "Need to be Exchanged: ", total_contact_first_obj1, " compared to ", total_contact_second_obj1
#print "Need to be Exchanged: ", nb_label_first_obj1, " compared to ", nb_label_second_obj1
temp1 = nb_label_first_obj1
temp2 = total_contact_first_obj1
nb_label_first_obj1 = nb_label_second_obj1
total_contact_first_obj1 = total_contact_second_obj1
nb_label_second_obj1 = temp1
total_contact_second_obj1 = temp2
cop_global_first_obj_prev = cop_global_second_obj_prev
cop_global_second_obj_prev = cop_global_first_obj1
print "The connected component corresponding to the first object is not the largest"
else:
cop_global_first_obj_prev = cop_global_first_obj1
cop_global_second_obj_prev = cop_global_second_obj_prev
#print "The largest connected component is the first object"
iterindex = iterindex + 1
return nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1
def callback(data, callback_args):
rospy.loginfo('Getting data and Saving pics!')
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
contact_vectors = np.row_stack([data.centers_x, data.centers_y, data.centers_z]).reshape((3,16,24))
fmags = ut.norm(force_vectors)
force_arr = fmags.reshape((16,24))
cx_arr = contact_vectors[0]
cy_arr = contact_vectors[1]
cz_arr = contact_vectors[2]
global max_force_first_obj, max_force_second_obj
label_im, nb_labels = compute_contact_regions(force_arr, 0.1)
nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object = compute_obj_labels(label_im,nb_labels)
if (total_contact_first_object > 1) and (total_contact_second_object > 1):
nb_label_first_obj, nb_label_second_obj, total_contact_first_obj, total_contact_second_obj = track_object_connected_component(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object)
global cop_global_first_obj, cop_global_second_obj
cop_local_first_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj)
cop_global_first_obj = r1*(cop_local_first_obj.T) + t1
cop_local_second_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj)
cop_global_second_obj = r1*(cop_local_second_obj.T) + t1
max_force_first_obj = compute_max_force(force_arr,label_im,nb_label_first_obj)
max_force_second_obj = compute_max_force(force_arr,label_im,nb_label_second_obj)
#if (nb_label_first_obj == nb_label_second_object) and (nb_label_second_obj == nb_label_first_object):
#print "New Max. Forces, 1st object: ", max_force_first_obj, " compared to 2nd object: ", max_force_second_obj
#print "Old Max. Forces, 1st object: ", max_force_first_obj_temp, " compared to 2nd object: ", max_force_second_obj_temp, "\n"
elif (total_contact_first_object > 1) and (total_contact_second_object < 2):
nb_label_first_obj, nb_label_second_obj, total_contact_first_obj, total_contact_second_obj = track_object_connected_component_single(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object)
global cop_global_first_obj, cop_global_second_obj
cop_local_first_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj)
cop_global_first_obj = r1*(cop_local_first_obj.T) + t1
cop_local_second_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj)
cop_global_second_obj = r1*(cop_local_second_obj.T) + t1
max_force_first_obj = compute_max_force(force_arr,label_im,nb_label_first_obj)
max_force_second_obj = compute_max_force(force_arr,label_im,nb_label_second_obj)
#if (nb_label_first_obj == nb_label_second_object) and (nb_label_second_obj == nb_label_first_object):
#print "New Max. Forces, 1st object: ", max_force_first_obj, " compared to 2nd object: ", max_force_second_obj
#print "Old Max. Forces, 1st object: ", max_force_first_obj_temp, " compared to 2nd object: ", max_force_second_obj_temp, "\n"
elif (total_contact_first_object > 1) and (total_contact_second_object < 2):
nb_label_first_obj, nb_label_second_obj, total_contact_first_obj, total_contact_second_obj = track_object_connected_component_single(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object)
global cop_global_first_obj, cop_global_second_obj
if nb_label_first_obj > 0:
cop_local_first_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj)
cop_global_first_obj = r1*(cop_local_first_obj.T) + t1
max_force_first_obj = compute_max_force(force_arr,label_im,nb_label_first_obj)
if nb_label_second_obj > 0:
cop_local_second_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj)
cop_global_second_obj = r1*(cop_local_second_obj.T) + t1
max_force_second_obj = compute_max_force(force_arr,label_im,nb_label_second_obj)
global time_varying_data_first_obj
time_varying_data_first_obj.append(max_force_first_obj)
global time_varying_data_second_obj
time_varying_data_second_obj.append(max_force_second_obj)
test_data_first_obj()
test_data_second_obj()
def test_data_first_obj():
global FLAG_Trunk
global FLAG_Trunk_List
global FLAG_Leaf
global FLAG_Leaf_List
# For Testing
global time_varying_data_first_obj
global max_force_first_obj
if (max_force_first_obj > 0):
ts_obj = time_varying_data_first_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
#print path_ff_obj[1], path_tf_obj[1]
diff_ff = abs(path_ff_obj[1]-path_tf_obj[1])
diff_tf = abs(path_tf_obj[1]-path_ff_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
#if ((obj == path_ff_obj[1]) and (diff_ff > 500)):
if ((obj == path_ff_obj[1]) and (obj_min > 1800)):
print 'Foliage :'
FLAG_Trunk = False
FLAG_Leaf = True
elif ((obj == path_tf_obj[1]) and (obj_min > 1800)):
print 'Trunk :'
FLAG_Trunk = True
FLAG_Leaf = False
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List.append(FLAG_Trunk)
FLAG_Leaf_List.append(FLAG_Leaf)
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
time_varying_data_first_obj = [0]
def test_data_second_obj():
global FLAG_Trunk
global FLAG_Trunk_List
global FLAG_Leaf
global FLAG_Leaf_List
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List = [False]
FLAG_Leaf_List = [False]
# For Testing
global time_varying_data_second_obj
global max_force_second_obj
if (max_force_second_obj > 0):
ts_obj = time_varying_data_second_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
#print path_ff_obj[1], path_tf_obj[1]
diff_ff = abs(path_ff_obj[1]-path_tf_obj[1])
diff_tf = abs(path_tf_obj[1]-path_ff_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
#if ((obj == path_ff_obj[1]) and (diff_ff > 500)):
if ((obj == path_ff_obj[1]) and (obj_min > 1800)):
print 'Foliage :'
FLAG_Trunk = False
FLAG_Leaf = True
elif ((obj == path_tf_obj[1]) and (obj_min > 1800)):
print 'Trunk :'
FLAG_Trunk = True
FLAG_Leaf = False
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List.append(FLAG_Trunk)
FLAG_Leaf_List.append(FLAG_Leaf)
else:
print 'Unknown'
FLAG_Trunk = False
FLAG_Leaf = False
time_varying_data_second_obj = [0]
def getdata():
rospy.init_node('time_varying_data_two_objects', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
def getpics():
global FLAG_Trunk_List
global FLAG_Leaf_List
## Need to figure out a way to visualize multiple object classification ####
if __name__ == '__main__':
Fmat = Fmat_original
Foliage_Trials = 5
Trunk_Trials = 5
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < Foliage_Trials):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_ff_force = np.zeros((number_states,1))
sigma_ff = np.zeros((number_states,1))
while (j < number_states):
mu_ff_force[j] = np.mean(feature_1_final_data[j])
sigma_ff[j] = scp.std(feature_1_final_data[j])
j = j+1
i = Foliage_Trials
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < (Foliage_Trials + Trunk_Trials)):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == Foliage_Trials:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_tf_force = np.zeros((number_states,1))
sigma_tf = np.zeros((number_states,1))
while (j < number_states):
mu_tf_force[j] = np.mean(feature_1_final_data[j])
sigma_tf[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = [0.0]*number_states
B_tf = [0.0]*number_states
for num_states in range(number_states):
B_ff[num_states] = [mu_ff_force[num_states][0],sigma_ff[num_states][0]]
B_tf[num_states] = [mu_tf_force[num_states][0],sigma_tf[num_states][0]]
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate FF, TF models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
total_seq = Fmat
for i in range((Foliage_Trials + Trunk_Trials)):
total_seq[i][:] = sum(total_seq[i][:],[])
# For Training
total_seq_ff = total_seq[0:Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials:(Foliage_Trials + Trunk_Trials)]
train_seq_ff = (np.array(total_seq_ff).T).tolist()
train_seq_tf = (np.array(total_seq_tf).T).tolist()
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
# Gather Data from Robot Online
index = 0
frame = 0
max_force_first_obj = 0
max_force_second_obj = 0
cop_global_first_obj = [0,0,0]
cop_global_second_obj = [0,0,0]
cop_global_first_obj_prev = [0,0,0]
cop_global_second_obj_prev = [0,0,0]
format = 'png'
fmags = np.zeros(384)
iterindex = 0
time_varying_data_first_obj = [0]
time_varying_data_second_obj = [0]
FLAG_Trunk = False
FLAG_Leaf = False
FLAG_Trunk_List = [False]
FLAG_Leaf_List = [False]
getdata()
getpics()
|
tapomayukh/projects_in_python
|
rapid_categorization/variable_length_training_data/hmm_force_two_objects.py
|
Python
|
mit
| 26,303
|
[
"Gaussian",
"Mayavi"
] |
eec2ca03434cbad3cc9cce8f0b290ed93df99823b08debbd066d522311ae2032
|
#!/usr/bin/env python
# $Id: Compiler.py,v 1.69 2005/07/10 20:32:06 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.69 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2005/07/10 20:32:06 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.69 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
from Cheetah.Version import Version
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, STATIC_CACHE, REFRESH_CACHE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
class Error(Exception):
pass
class GenUtils:
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves."""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheToken):
"""Decipher a placeholder cachetoken
"""
match = self._parser.cacheTokenRE.match(cacheToken)
subGrpDict = match.groupdict()
cacheInfo = {}
if subGrpDict['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(subGrpDict['interval'])
elif subGrpDict['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList)."""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
"""
defaultUseAC = self.setting('useAutocalling')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler, templateObj=None):
self._settingsManager = classCompiler
self._templateObj = templateObj
self._methodName = methodName
self._setupState()
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionOpen = False
def cleanupState(self):
"""Called by the containing class compiler instance"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody(),
)
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join(self._docStringLines) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%','%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, rawExpr=None):
"""
"""
self.addWriteChunk('filter(' + chunk + ', rawExpr=' + repr(rawExpr) +')')
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string."""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah \$vars in the string."""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if self._pendingStrConstChunks:
strConst = self._unescapeCheetahVars(''.join(self._pendingStrConstChunks))
strConst = self._unescapeDirectives(strConst)
self._pendingStrConstChunks = []
if self.setting('reprShortStrConstants') and \
strConst.count('\n') < self.setting('reprNewlineThreshold'):
self.addWriteChunk( repr(strConst).replace('\\012','\\n'))
else:
strConst = strConst.replace('\\','\\\\').replace("'''","'\'\'\'")
if strConst[0] == "'":
strConst = '\\' + strConst
if strConst[-1] == "'":
strConst = strConst[:-1] + '\\' + strConst[-1]
self.addWriteChunk("'''" + strConst + "'''" )
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addSilent(self, expr):
self.addChunk( expr )
def addSet(self, LVALUE, OP, RVALUE, isGlobal=True):
## we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2,0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
if isGlobal:
LVALUE = 'globalSetVars["' + primary + '"]' + secondary
else:
pass
self.addChunk( LVALUE + ' ' + OP + ' ' + RVALUE.strip() )
def addInclude(self, sourceExpr, includeFrom, isRaw):
# @@TR: consider soft-coding this
self.addWriteChunk('self._includeCheetahSource(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr):
self.addIndentingDirective(expr)
def addFor(self, expr):
self.addIndentingDirective(expr)
def addRepeat(self, expr):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount,expr))
def addIndentingDirective(self, expr):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
self.indent()
def addReIndentingDirective(self, expr):
self.commitStrConst()
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
self.indent()
def addIf(self, expr):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr)
def addOneLineIf(self, conditionExpr, trueExpr, falseExpr):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr):
expr = re.sub(r'else[ \f\t]+if','elif', expr)
self.addReIndentingDirective(expr)
def addUnless(self, expr):
self.addIf('if not (' + expr + ')')
def addTry(self, expr):
self.addIndentingDirective(expr)
def addExcept(self, expr):
self.addReIndentingDirective(expr)
def addFinally(self, expr):
self.addReIndentingDirective(expr)
def addReturn(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return str(random.randrange(100, 999)) \
+ str(random.randrange(10000, 99999))
def startCacheRegion(self, cacheInfo, lineCol):
ID = self.nextCacheID()
interval = cacheInfo.get('interval',None)
test = cacheInfo.get('test',None)
customID = cacheInfo.get('id',None)
if customID:
ID = repr(customID)
varyBy = cacheInfo.get('varyBy',ID)
self._cacheRegionOpen = True # attrib of current methodCompiler
self.addChunk('## START CACHE REGION: at line, col ' + str(lineCol) + ' in the source.')
self.addChunk('RECACHE = False')
self.addChunk('region = self._cacheRegions.get(' + ID + ')')
self.addChunk('if not region:')
self.indent()
self.addChunk("region = CacheRegion()")
self.addChunk("self._cacheRegions[" + ID + "] = region")
self.addChunk('RECACHE = True')
self.dedent()
self.addChunk('cache = region.getCache('+varyBy+')')
if interval:
self.addMethDocString('This cache will be refreshed every ' +
str(interval) + ' seconds.')
self.addChunk('if (not cache.getRefreshTime())' +
' or (currentTime() > cache.getRefreshTime()):')
self.indent()
self.addChunk("cache.setRefreshTime(currentTime() +" + str(interval) + ")")
self.addChunk('RECACHE = True')
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('RECACHE = True')
self.dedent()
self.addChunk('if RECACHE or not cache.getData():')
self.indent()
self.addChunk('orig_trans = trans')
self.addChunk('trans = cacheCollector = DummyTransaction()')
self.addChunk('write = cacheCollector.response().write')
def endCacheRegion(self):
self._cacheRegionOpen = False
self.addChunk('trans = orig_trans')
self.addChunk('write = trans.response().write')
self.addChunk('cache.setData(cacheCollector.response().getvalue())')
self.addChunk('del cacheCollector')
self.dedent()
self.addWriteChunk('cache.getData()')
self.addChunk('## END CACHE REGION')
self.addChunk('')
def setErrorCatcher(self, errorCatcherName):
if self._templateObj:
self._templateObj._errorCatcher = \
getattr(ErrorCatchers, errorCatcherName)(self._templateObj)
self.addChunk('if self._errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._errorCatcher = self._errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._errorCatcher = self._errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def setFilter(self, theFilter, isKlass):
if isKlass:
self.addChunk('filter = self._currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('filter = self._initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('filter = self._currentFilter = self._filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('filter = self._currentFilter = \\\n\t\t\tself._filters[filterName] = '
+ 'getattr(self._filtersLib, filterName)(self).filter')
self.dedent()
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self",None) ]
self._streamingEnabled = True
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionOpen:
self.endCacheRegion()
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
if self._streamingEnabled:
for argName, defVal in [ ('trans', 'None'),
("dummyTrans","False"),
("VFFSL","valueFromFrameOrSearchList"),
("VFN","valueForName"),
("getmtime","getmtime"),
("currentTime","time.time"),
]:
self.addMethArg(argName, defVal)
def _addAutoSetupCode(self):
if self._streamingEnabled:
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
self.addChunk('dummyTrans = True')
self.dedent()
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('dummyTrans = True')
self.addChunk('write = trans.response().write')
self.addChunk('SL = self._searchList')
self.addChunk('filter = self._currentFilter')
self.addChunk('globalSetVars = self._globalSetVars')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('if dummyTrans:')
self.indent()
self.addChunk('return trans.response().getvalue()')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('return ""')
self.dedent()
def addMethArg(self, name, defVal=None):
asteriskPos = max(name.rfind('*')+1, 0)
if asteriskPos:
self._streamingEnabled = False
self._argStringList.append( (name,defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
return (self._indent + "def " + self.methodName() + "(" +
(',\n' + self._indent*3).join(argStringChunks) + "):\n\n")
##################################################
## CLASS COMPILERS
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
templateObj=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._mainMethodName = mainMethodName
self._templateObj = templateObj
self._setupState()
methodCompiler = self._spawnMethodCompiler(mainMethodName)
methodCompiler.addMethDocString('This is the main method generated by Cheetah')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError, name
def _setupState(self):
self._classDef = None
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
self._generatedAttribs = [] # printed after methods in the gen class def
self._initMethChunks = []
self._alias__str__ = True # should we set the __str__ alias
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
self._generatedAttribs.append('__str__ = respond')
if self._templateObj:
self._templateObj.__str__ = self._templateObj.respond
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName)
)
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk("%s.__init__(self, *args, **KWs)" % self._baseClass)
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
if self._templateObj:
setattr(self._templateObj, '_filePath', fileName)
setattr(self._templateObj, '_fileMtime', getmtime(fileName))
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self.compile(file=self._filePath, moduleName='
+className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def _spawnMethodCompiler(self, methodName, klass=None):
if klass is None:
klass = self.methodCompilerClass
methodCompiler = klass(methodName, classCompiler=self, templateObj=self._templateObj)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
if self._templateObj and methodCompiler.methodName() != '__init__':
self._templateObj._bindCompiledMethod(methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(methodName)
self._setActiveMethodCompiler(methodCompiler)
## deal with the method's argstring
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
methodCompiler.addMethDocString(parserComment)
def _finishedMethods(self):
return self._finishedMethodsList
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%','%%'))
def addChunkToInit(self,chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
if self._templateObj:
exec('self._templateObj.' + attribExpr.strip())
def addSettingsToInit(self, settingsStr, settingsType='ini'):
#@@TR 2005-01-01: this may not be used anymore?
if settingsType=='python':
reader = 'updateSettingsFromPySrcStr'
else:
reader = 'updateSettingsFromConfigStr'
settingsCode = ("self." + reader + "('''" +
settingsStr.replace("'''","\'\'\'") +
"''')")
self.addChunkToInit(settingsCode)
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if self._placeholderToErrorCatcherMap.has_key(rawCode):
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line, col ' + str(lineCol))
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(methodName, klass=MethodCompiler)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addMethDocString('Generated from ' + rawCode +
' at line, col ' + str(lineCol) + '.')
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
def wrapClassDef(self):
self.addClassDocString('')
self.addClassDocString(self.setting('defDocStrMsg'))
ind = self.setting('indentationStep')
classDefChunks = (
self.classSignature(),
self.classDocstring(),
ind + '#'*50,
ind + '## GENERATED METHODS',
'\n',
self.methodDefs(),
ind + '#'*50,
ind + '## GENERATED ATTRIBUTES',
'\n',
self.attributes(),
)
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [str(methGen) for methGen in self._finishedMethods() ]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
#class ModuleCompiler(Parser, GenUtils):
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None, moduleName='GenTemplate',
mainClassName=None,
mainMethodName='respond',
templateObj=None,
settings=None):
SettingsManager.__init__(self)
if settings:
self.updateSettings(settings)
self._templateObj = templateObj
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodName = mainMethodName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, types.StringType) or isinstance(file, types.UnicodeType): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = \
os.path.splitext(self._fileBaseName)
if not (isinstance(source, str) or isinstance(source, unicode)):
source = str( source )
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError, name
def _initializeSettings(self):
defaults = {
'indentationStep': ' '*4,
'initialMethIndentLevel': 2,
'monitorSrcFile':False,
## controlling the handling of Cheetah $vars
'useNameMapper': True, # Unified dotted notation and the searchList
'useAutocalling': True, # detect and call callable()'s
'useErrorCatcher':False,
## controlling the aesthetic appearance of the generated code
'commentOffset': 1,
# should shorter str constant chunks be printed using repr rather than ''' quotes
'reprShortStrConstants': True,
'reprNewlineThreshold':3,
'outputRowColComments':True,
## should #block's be wrapped in a comment in the template's output
'includeBlockMarkers': False,
'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'),
'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
'gettextTokens': ["_", "N_", "ngettext"],
## @@TR: The following really belong in the parser, but I've put them
## here for the time being to facilitate separating the parser and
## compiler:
'cheetahVarStartToken':'$',
'commentStartToken':'##',
'multiLineCommentStartToken':'#*',
'multiLineCommentEndToken':'*#',
'directiveStartToken':'#',
'directiveEndToken':'#',
'PSPStartToken':'<%',
'PSPEndToken':'%>',
}
self.updateSettings( defaults )
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"from os.path import getmtime, exists",
"import time",
"import types",
"import __builtin__",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import DummyTransaction",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"try:",
" True, False",
"except NameError:",
" True, False = (1==1), (1==0)",
"VFFSL=valueFromFrameOrSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
self._errorCatcherOn = False
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
def _spawnClassCompiler(self, className, klass=None,
mainMethodName='respond'):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
mainMethodName=self._mainMethodName,
templateObj=self._templateObj,
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames):
self._importedVarNames.extend(varNames)
def isErrorCatcherOn(self):
return self._errorCatcherOn
def turnErrorCatcherOn(self):
self._errorCatcherOn = True
def turnErrorCatcherOff(self):
self._errorCatcherOn = False
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
# change the default mainMethodName from the default 'respond'
self.setMainMethodName('writeBody') # @@TR: needs some thought
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
chunks = baseClassName.split('.')
if len(chunks) > 1:
modName, bareClassName = '.'.join(chunks[:-1]), chunks[-1]
else:
# baseClassName is either unimported modName
# or a previously imported classname
modName = bareClassName = baseClassName
if modName not in self.importedVarNames():
if len(chunks) > 1 and bareClassName != chunks[:-1][-1]:
modName = '.'.join(chunks)
importStatement = "from %s import %s" % (modName, bareClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [bareClassName,] )
self._getActiveClassCompiler().setBaseClass(bareClassName)
##################################################
## dynamically bind to and __init__ with this new baseclass
# - this is required for dynamic use of templates compiled directly from file
# - also necessary for the 'monitorSrc' fileMtime triggered recompiles
if self._templateObj:
mod = self._templateObj._importAsDummyModule('\n'.join(self._importStatements))
class newClass:
pass
newClass.__name__ = self._mainClassName
__bases__ = (getattr(mod, self._baseClass), )
newClass.__bases__ = __bases__
self._templateObj.__class__ = newClass
# must initialize it so instance attributes are accessible
newClass.__init__(self._templateObj,
_globalSetVars=self._templateObj._globalSetVars,
_preBuiltSearchList=self._templateObj._searchList)
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = '# -*- coding: %s -*-' %encoding
def addModuleHeader(self, line):
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
self._moduleDocStringLines.append(line)
def addSpecialVar(self, basename, contents):
self._specialVars['__' + basename + '__'] = contents.strip()
def addImportStatement(self, impStatement):
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if var!='*']
self.addImportedVarNames(importVarNames) #used by #extend for auto-imports
if self._templateObj:
import Template as TemplateMod
mod = self._templateObj._importAsDummyModule(impStatement)
# @@TR 2005-01-15: testing this approach to support
# 'from foo import *'
self._templateObj._searchList.append(mod)
# @@TR: old buggy approach is still needed for now
for varName in importVarNames:
if varName == '*': continue
val = getattr(mod, varName.split('.')[0])
setattr(TemplateMod, varName, val)
def addGlobalCodeChunk(self, codeChunk):
self._globalCodeChunks.append(codeChunk)
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
if self._templateObj:
# @@TR: this code should be delegated to the compiler
val = eval(expr,{},{})
setattr(self._templateObj, attribName, val)
def addComment(self, comm):
if re.match(r'#+$',comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def moduleDef(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = moduleDef
def wrapModuleDef(self):
self.addModuleDocString('')
self.addModuleDocString(self.setting('defDocStrMsg'))
self.addModuleDocString(' CHEETAH VERSION: ' + Version)
self.addSpecialVar('CHEETAH_version', Version)
self.addModuleDocString(' Generation time: ' + self.timestamp())
self.addSpecialVar('CHEETAH_genTime', self.timestamp())
if self._filePath:
self.addSpecialVar('CHEETAH_src', self._filePath)
self.addModuleDocString(' Source file: ' + self._filePath)
self.addModuleDocString(' Source file last modified: ' +
self.timestamp(self._fileMtime))
moduleDef = """%(header)s
%(docstring)s
%(specialVars)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
##################################################
## CLASSES
%(classes)s
%(footer)s
""" % {'header':self.moduleHeader(),
'docstring':self.moduleDocstring(),
'specialVars':self.specialVars(),
'imports':self.importStatements(),
'constants':self.moduleConstants(),
'classes':self.classDefs(),
'footer':self.moduleFooter(),
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncoding + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) +
'\n'
)
return header
def moduleDocstring(self):
docStr = ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n'
)
return docStr
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = theVars.keys()
keys.sort()
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [str(klass) for klass in self._finishedClasses() ]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd, Mike Orr, Ian Bicking and Chuck Esterbrook;
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org
##################################################
## if run from command line:
if __name__ == '__main__':
%(className)s().runAsMainProgram()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
|
jhjguxin/PyCDC
|
Karrigell-2.3.5/Cheetah/Compiler.py
|
Python
|
gpl-3.0
| 54,838
|
[
"VisIt"
] |
a86b1d1b6d9e7d6c72a7ccd5c32d59f2357af023420dd02795b6df77f12f43e6
|
__docformat__ = "restructuredtext en"
import mdp
from mdp import numx
from mdp.utils import mult, matmult, invert_exp_funcs2
from mdp.nodes import GrowingNeuralGasNode
def nmonomials(degree, nvariables):
"""Return the number of monomials of a given degree in a given number
of variables."""
return int(mdp.utils.comb(nvariables+degree-1, degree))
def expanded_dim(degree, nvariables):
"""Return the size of a vector of dimension ``nvariables`` after
a polynomial expansion of degree ``degree``."""
return int(mdp.utils.comb(nvariables+degree, degree))-1
class _ExpansionNode(mdp.Node):
def __init__(self, input_dim = None, dtype = None):
super(_ExpansionNode, self).__init__(input_dim, None, dtype)
def expanded_dim(self, dim):
return dim
@staticmethod
def is_trainable():
return False
@staticmethod
def is_invertible():
return False
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = self.expanded_dim(n)
def _set_output_dim(self, n):
msg = "Output dim cannot be set explicitly!"
raise mdp.NodeException(msg)
class PolynomialExpansionNode(_ExpansionNode):
"""Perform expansion in a polynomial space."""
def __init__(self, degree, input_dim = None, dtype = None):
"""
Input arguments:
degree -- degree of the polynomial space where the input is expanded
"""
self._degree = int(degree)
super(PolynomialExpansionNode, self).__init__(input_dim, dtype)
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node."""
return (mdp.utils.get_dtypes('AllFloat') +
mdp.utils.get_dtypes('AllInteger'))
def expanded_dim(self, dim):
"""Return the size of a vector of dimension 'dim' after
a polynomial expansion of degree 'self._degree'."""
return expanded_dim(self._degree, dim)
def _execute(self, x):
degree = self._degree
dim = self.input_dim
n = x.shape[1]
# preallocate memory
dexp = numx.zeros((self.output_dim, x.shape[0]), dtype=self.dtype)
# copy monomials of degree 1
dexp[0:n, :] = x.T
k = n
prec_end = 0
next_lens = numx.ones((dim+1, ))
next_lens[0] = 0
for i in range(2, degree+1):
prec_start = prec_end
prec_end += nmonomials(i-1, dim)
prec = dexp[prec_start:prec_end, :]
lens = next_lens[:-1].cumsum(axis=0)
next_lens = numx.zeros((dim+1, ))
for j in range(dim):
factor = prec[lens[j]:, :]
len_ = factor.shape[0]
dexp[k:k+len_, :] = x[:, j] * factor
next_lens[j+1] = len_
k = k+len_
return dexp.T
class QuadraticExpansionNode(PolynomialExpansionNode):
"""Perform expansion in the space formed by all linear and quadratic
monomials.
``QuadraticExpansionNode()`` is equivalent to a
``PolynomialExpansionNode(2)``"""
def __init__(self, input_dim = None, dtype = None):
super(QuadraticExpansionNode, self).__init__(2, input_dim = input_dim,
dtype = dtype)
class RBFExpansionNode(mdp.Node):
"""Expand input space with Gaussian Radial Basis Functions (RBFs).
The input data is filtered through a set of unnormalized Gaussian
filters, i.e.::
y_j = exp(-0.5/s_j * ||x - c_j||^2)
for isotropic RBFs, or more in general::
y_j = exp(-0.5 * (x-c_j)^T S^-1 (x-c_j))
for anisotropic RBFs.
"""
def __init__(self, centers, sizes, dtype = None):
"""
:Arguments:
centers
Centers of the RBFs. The dimensionality
of the centers determines the input dimensionality;
the number of centers determines the output
dimensionalities
sizes
Radius of the RBFs.
``sizes`` is a list with one element for each RBF, either
a scalar (the variance of the RBFs for isotropic RBFs)
or a covariance matrix (for anisotropic RBFs).
If ``sizes`` is not a list, the same variance/covariance
is used for all RBFs.
"""
super(RBFExpansionNode, self).__init__(None, None, dtype)
self._init_RBF(centers, sizes)
@staticmethod
def is_trainable():
return False
@staticmethod
def is_invertible():
return False
def _init_RBF(self, centers, sizes):
# initialize the centers of the RBFs
centers = numx.array(centers, self.dtype)
# define input/output dim
self.set_input_dim(centers.shape[1])
self.set_output_dim(centers.shape[0])
# multiply sizes if necessary
sizes = numx.array(sizes, self.dtype)
if sizes.ndim==0 or sizes.ndim==2:
sizes = numx.array([sizes]*self._output_dim)
else:
# check number of sizes correct
if sizes.shape[0] != self._output_dim:
msg = "There must be as many RBF sizes as centers"
raise mdp.NodeException, msg
if numx.isscalar(sizes[0]):
# isotropic RBFs
self._isotropic = True
else:
# anisotropic RBFs
self._isotropic = False
# check size
if (sizes.shape[1] != self._input_dim or
sizes.shape[2] != self._input_dim):
msg = ("Dimensionality of size matrices should be the same " +
"as input dimensionality (%d != %d)"
% (sizes.shape[1], self._input_dim))
raise mdp.NodeException, msg
# compute inverse covariance matrix
for i in range(sizes.shape[0]):
sizes[i,:,:] = mdp.utils.inv(sizes[i,:,:])
self._centers = centers
self._sizes = sizes
def _execute(self, x):
y = numx.zeros((x.shape[0], self._output_dim), dtype = self.dtype)
c, s = self._centers, self._sizes
for i in range(self._output_dim):
dist = x - c[i,:]
if self._isotropic:
tmp = (dist**2.).sum(axis=1) / s[i]
else:
tmp = (dist*matmult(dist, s[i,:,:])).sum(axis=1)
y[:,i] = numx.exp(-0.5*tmp)
return y
class GrowingNeuralGasExpansionNode(GrowingNeuralGasNode):
"""
Perform a trainable radial basis expansion, where the centers and
sizes of the basis functions are learned through a growing neural
gas.
positions of RBFs
position of the nodes of the neural gas
sizes of the RBFs
mean distance to the neighbouring nodes.
Important: Adjust the maximum number of nodes to control the
dimension of the expansion.
More information on this expansion type can be found in:
B. Fritzke.
Growing cell structures-a self-organizing network for unsupervised
and supervised learning. Neural Networks 7, p. 1441--1460 (1994).
"""
def __init__(self, start_poss=None, eps_b=0.2, eps_n=0.006, max_age=50,
lambda_=100, alpha=0.5, d=0.995, max_nodes=100,
input_dim=None, dtype=None):
"""
For a full list of input arguments please check the documentation
of GrowingNeuralGasNode.
max_nodes (default 100) : maximum number of nodes in the
neural gas, therefore an upper bound
to the output dimension of the
expansion.
"""
# __init__ is overwritten only to reset the default for
# max_nodes. The default of the GrowingNeuralGasNode is
# practically unlimited, possibly leading to very
# high-dimensional expansions.
super(GrowingNeuralGasExpansionNode, self).__init__(
start_poss=start_poss, eps_b=eps_b, eps_n=eps_n, max_age=max_age,
lambda_=lambda_, alpha=alpha, d=d, max_nodes=max_nodes,
input_dim=input_dim, dtype=dtype)
def _set_input_dim(self, n):
# Needs to be overwritten because GrowingNeuralGasNode would
# fix the output dim to n here.
self._input_dim = n
def _set_output_dim(self, n):
msg = "Output dim cannot be set explicitly!"
raise mdp.NodeException(msg)
@staticmethod
def is_trainable():
return True
@staticmethod
def is_invertible():
return False
def _stop_training(self):
super(GrowingNeuralGasExpansionNode, self)._stop_training()
# set the output dimension to the number of nodes of the neural gas
self._output_dim = self.get_nodes_position().shape[0]
# use the nodes of the learned neural gas as centers for a radial
# basis function expansion.
centers = self.get_nodes_position()
# use the mean distances to the neighbours as size of the RBF expansion
sizes = []
for i,node in enumerate(self.graph.nodes):
# calculate the size of the current RBF
pos = node.data.pos
sizes.append(numx.array([((pos-neighbor.data.pos)**2).sum()
for neighbor in node.neighbors() ]).mean())
# initialize the radial basis function expansion with centers and sizes
self.rbf_expansion = mdp.nodes.RBFExpansionNode(centers = centers,
sizes = sizes)
def _execute(self,x):
return self.rbf_expansion(x)
class GeneralExpansionNode(_ExpansionNode):
"""Expands the input signal x according to a list [f_0, ... f_k]
of functions.
Each function f_i should take the whole two-dimensional array x as input and
output another two-dimensional array. Moreover the output dimension should
depend only on the input dimension.
The output of the node is [f_0[x], ... f_k[x]], that is, the concatenation
of each one of the outputs f_i[x].
Original code contributed by Alberto Escalante.
"""
def __init__(self, funcs, input_dim = None, dtype = None):
"""
Short argument description:
``funcs``
list of functions f_i that realize the expansion
"""
self.funcs = funcs
super(GeneralExpansionNode, self).__init__(input_dim, dtype)
def expanded_dim(self, n):
"""The expanded dim is computed by directly applying the expansion
functions f_i to a zero input of dimension n.
"""
return int(self.output_sizes(n).sum())
def output_sizes(self, n):
"""Return the individual output sizes of each expansion function
when the input has lenght n"""
sizes = numx.zeros(len(self.funcs))
x = numx.zeros((1,n))
for i, func in enumerate(self.funcs):
outx = func(x)
sizes[i] = outx.shape[1]
return sizes
@staticmethod
def is_trainable():
return False
@staticmethod
def is_invertible():
return False
def pseudo_inverse(self, x, use_hint=None):
"""Calculate a pseudo inverse of the expansion using
scipy.optimize.
``use_hint``
when calculating the pseudo inverse of the expansion,
the hint determines the starting point for the approximation
This method requires scipy."""
try:
app_x_2, app_ex_x_2 = invert_exp_funcs2(x,
self.input_dim,
self.funcs,
use_hint=use_hint,
k=0.001)
return app_x_2.astype(self.dtype)
except NotImplementedError, exc:
raise mdp.MDPException(exc)
def _execute(self, x):
if self.input_dim is None:
self.set_input_dim(x.shape[1])
num_samples = x.shape[0]
sizes = self.output_sizes(self.input_dim)
out = numx.zeros((num_samples, self.output_dim), dtype=self.dtype)
current_pos = 0
for i, func in enumerate(self.funcs):
out[:,current_pos:current_pos+sizes[i]] = func(x)
current_pos += sizes[i]
return out
### old weave inline code to perform a quadratic expansion
# weave C code executed in the function QuadraticExpansionNode.execute
## _EXPANSION_POL2_CCODE = """
## // first of all, copy the linear part
## for( int i=0; i<columns; i++ ) {
## for( int l=0; l<rows; l++ ) {
## dexp(l,i) = x(l,i);
## }
## }
## // then, compute all monomials of second degree
## int k=columns;
## for( int i=0; i<columns; i++ ) {
## for( int j=i; j<columns; j++ ) {
## for( int l=0; l<rows; l++ ) {
## dexp(l,k) = x(l,i)*x(l,j);
## }
## k++;
## }
## }
## """
# it was called like that:
## def execute(self, x):
## mdp.Node.execute(self, x)
## rows = x.shape[0]
## columns = self.input_dim
## # dexp is going to contain the expanded signal
## dexp = numx.zeros((rows, self.output_dim), dtype=self._dtype)
## # execute the inline C code
## weave.inline(_EXPANSION_POL2_CCODE,['rows','columns','dexp','x'],
## type_factories = weave.blitz_tools.blitz_type_factories,
## compiler='gcc',extra_compile_args=['-O3']);
## return dexp
|
ME-ICA/me-ica
|
meica.libs/mdp/nodes/expansion_nodes.py
|
Python
|
lgpl-2.1
| 13,630
|
[
"Gaussian"
] |
3dad45dfbb9016c2948b06222b668beb36ca5985319574c4492b2d87443e9239
|
"""Scraper for the Supreme Court of Ohio
CourtID: ohio
Court Short Name: Ohio
Author: Andrei Chelaru
Reviewer: mlr
History:
- Stubbed out by Brian Carver
- 2014-07-30: Finished by Andrei Chelaru
"""
from juriscraper.OpinionSite import OpinionSite
import time
from datetime import date
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
# Changing the page # in the url will get additional pages
# Changing the source # (0-13) will get the 12 Courts of Appeals and
# the Court of Claims. We do not use the "all sources" link because a
# single day might yield more than 25 opinions and this scraper is
# not designed to walk through multiple pages.
self.court_index = 0
self.year = date.today().year
self.url = self.make_url(self.court_index, self.year)
self.court_id = self.__module__
self.back_scrape_iterable = range(1992, 2014)
self.base_path = "id('Table1')//tr[position() > 1]/td[2][normalize-space(.//text())]"
@staticmethod
def make_url(index, year):
return (
'http://www.sconet.state.oh.us/ROD/docs/default.asp?Page=1&Sort=docdecided%20DESC&PageSize=100&Source={court}&iaFilter={year}&ColumnMask=669'.format(
court=index,
year=year)
)
def _get_case_names(self):
path = "{base}/preceding::td[1]//text()".format(base=self.base_path)
return list(self.html.xpath(path))
def _get_download_urls(self):
path = "{base}/preceding::td[1]//a[1]/@href".format(base=self.base_path)
return list(self.html.xpath(path))
def _get_docket_numbers(self):
path = "{base}//text()".format(base=self.base_path)
return list(self.html.xpath(path))
def _get_summaries(self):
path = "{base}/following::td[1]//text()".format(base=self.base_path)
return list(self.html.xpath(path))
def _get_case_dates(self):
path = "{base}/following::td[3]//text()".format(base=self.base_path)
dates = []
for txt in self.html.xpath(path):
dates.append(date.fromtimestamp(time.mktime(time.strptime(
txt.strip(), '%m/%d/%Y'))))
return dates
def _get_neutral_citations(self):
path = "{base}/following::td[4]//text()".format(base=self.base_path)
return [s.replace('-', ' ') for s in self.html.xpath(path)]
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_judges(self):
path = "{base}/following::td[2]".format(base=self.base_path)
return map(self._return_judge, self.html.xpath(path))
@staticmethod
def _return_judge(e):
txt = e.xpath(".//text()")
if txt:
return txt[0]
else:
return ''
def _download_backwards(self, i):
self.url = 'http://www.sconet.state.oh.us/ROD/docs/default.asp?Page={i}&Sort=docdecided%20DESC&PageSize=100&Source={court}&iaFilter=-2&ColumnMask=669'.format(
i=i,
court=self.court_index,
)
self.html = self._download()
|
brianwc/juriscraper
|
opinions/united_states/state/ohio.py
|
Python
|
bsd-2-clause
| 3,141
|
[
"Brian"
] |
101ee48f9bd72a46ca0ee1a3c575bb4ef8797d56223889996407e2e099305dd3
|
"""Miscellaneous functions and classes that dont fit into specific
categories."""
import sys, os, vtk
#----------------------------------------------------------------------
# the following functions are for the vtk regression testing and examples
def vtkGetDataRoot():
"""vtkGetDataRoot() -- return vtk example data directory
"""
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
def vtkGetTempDir():
"""vtkGetTempDir() -- return vtk testing temp dir
"""
tempIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-T' and i < len(sys.argv)-1:
tempIndex = i+1
if tempIndex != -1:
tempDir = sys.argv[tempIndex]
else:
tempDir = '.'
return tempDir
def vtkRegressionTestImage( renWin ):
"""vtkRegressionTestImage(renWin) -- produce regression image for window
This function writes out a regression .png file for a vtkWindow.
Does anyone involved in testing care to elaborate?
"""
imageIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-V' and i < len(sys.argv)-1:
imageIndex = i+1
if imageIndex != -1:
fname = os.path.join(vtkGetDataRoot(), sys.argv[imageIndex])
rt_w2if = vtk.vtkWindowToImageFilter()
rt_w2if.SetInput(renWin)
if os.path.isfile(fname):
pass
else:
rt_pngw = vtk.vtkPNGWriter()
rt_pngw.SetFileName(fname)
rt_pngw.SetInputConnection(rt_w2if.GetOutputPort())
rt_pngw.Write()
rt_pngw = None
rt_png = vtk.vtkPNGReader()
rt_png.SetFileName(fname)
rt_id = vtk.vtkImageDifference()
rt_id.SetInputConnection(rt_w2if.GetOutputPort())
rt_id.SetImageConnection(rt_png.GetOutputPort())
rt_id.Update()
if rt_id.GetThresholdedError() <= 10:
return 1
else:
sys.stderr.write('Failed image test: %f\n'
% rt_id.GetThresholdedError())
return 0
return 2
|
collects/VTK
|
Wrapping/Python/vtk/util/misc.py
|
Python
|
bsd-3-clause
| 2,365
|
[
"VTK"
] |
de227d0d19432c513556be5730763b0bee36abd82728c46aaa4a25de20f1d91a
|
import MDAnalysis
import sys
# ----------------------------------------
# HOMEMADE ANALYSIS SELECTIONS
selections = []
selections.append(['aligned_CAs','protein and name CA and (resid 14:19 44:49 67:69 84:88 106:110 136:141 159:163 184:188 208:212 230:234 247:252 295:299)'])
selections.append(['aligned_betas','protein and (resid 14:19 44:49 67:69 84:88 106:110 136:141 159:163 184:188 208:212 230:234 247:252 295:299) and not name H*'])
selections.append(['full_protein','protein and not name H*'])
selections.append(['full_backbone','backbone'])
selections.append(['protein-10','protein and not resid 0:13 and not name H*'])
selections.append(['backbone-10','backbone and not resid 0:13'])
selections.append(['motif_1','protein and resid 19:30 and not name H*'])
selections.append(['motif_1a','protein and resid 49:53 and not name H*'])
selections.append(['a2_1','protein and resid 54:63 and not name H*'])
selections.append(['motif_1c','protein and resid 71:74 and not name H*'])
selections.append(['motif_1b','protein and resid 87:93 and not name H*'])
selections.append(['motif_2','protein and resid 111:118 and not name H*'])
selections.append(['motif_3','protein and resid 142:144 and not name H*'])
selections.append(['post_m_3','protein and resid 145:156 and not name H*'])
selections.append(['motif_4','protein and resid 190:196 and not name H*'])
selections.append(['motif_4a','protein and resid 213:218 221 and not name H*'])
selections.append(['motif_5','protein and resid 234:242 and not name H*'])
selections.append(['motif_6','protein and resid 281:290 and not name H*'])
# ----------------------------------------
# STANDARD RESIDUES:
nucleic = ['A5','A3','A','G5','G3','G','C5','C3','C','T5','T3','T','U5','U3','U']
triphosphate = ['atp','adp','PHX']
other = ['MG']
# ----------------------------------------
# HOMEMADE ATOM SELECTION STRINGS FOR THE STANDARD RESIDUES:
sugar = "name C5' C4' O4' C1' C3' C2' O2' " + " C5* C4* O4* C1* C3* O3* C2* O2* " # NO HYDROGENS; DOES NOT INCLUDE THE O5' atom (which I will include in the phosphate atom selection string...; the atoms with * are found in triphosphates;
sugar_5= sugar + " O5'" # NO HYDROGENS
sugar_3= sugar + " O3' " # NO HYDROGENS
base = 'name N9 C8 N7 C5 C6 N6 N1 C2 N3 C4 O6 N4 C2 O2 O4' # NO HYDROGENS; selection string that will select all appropriate atoms for any of the nucleic residues...
a_phos = 'name O5* O2A O1A PA O3A'
b_phos = 'name PB O1B O2B O3B'
g_phos = 'name PG O1G O2G O3G'
inorg_phos = 'name P O1 O2 O3 O4' # NO HYDROGENS
# ----------------------------------------
# FUNCTION USED TO MAKE ANY OF THE HOMEMADE ATOM SELECTIONS FOR THE STANDARD RESIDUES
def make_selections(analysis_universe,ref_universe,resname,resid,output_file,selection_list,nAtoms,ref_pos,count):
"""A function that takes in a residue name and creates a non-standard MDAnalysis atom selection
Usage: make_selection(........)
Arguments:
analysis_universe: MDAnalysis Universe object to be used as the analysis universe.
reference_universe: MDAnalysis Universe object to be used as the reference universe.
resname: string of the residue name;
resid: int of the residue ID number;
output_file: file object that is to be written to;
"""
# ----------------------------------------
# CREATING THE NUCLEIC SELECTIONS
if resname in nucleic:
# CREATING THE SLECTION FOR THE BASE OF NUCLEIC RESIDUES
sel_string = 'resname %s and resid %d and %s' %(resname,resid,base)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
# CREATING THE SLECTION FOR THE SUGAR OF NUCLEIC RESIDUES
if resname in ['A5','G5','C5','T5','C5']:
sel_string = 'resname %s and resid %d and %s' %(resname,resid,sugar_5)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
return
elif resname in ['A3','U3','C3','G3']:
sel_string = 'resname %s and resid %d and %s' %(resname,resid,sugar_3)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
else:
sel_string = 'resname %s and resid %d and %s' %(resname,resid,sugar)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
# CREATING THE SLECTION FOR THE PHOSPHATE OF NUCLEIC RESIDUES
sel_string = "(resname %s and resid %s and name P OP1 OP2 O5') or (resid %s and name O3')" %(resname,resid,analysis_universe.residues[resid-1].resid)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count += 1
return
# ----------------------------------------
# CREATING THE TRIPHOSPHATE ATOM SELECTIONS
elif resname in triphosphate:
if resname in ['atp','adp']:
sel_string = 'resname %s and resid %d and %s' %(resname,resid,base)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
sel_string = 'resname %s and resid %d and %s' %(resname,resid,sugar)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
if u_temp.n_atoms != ref_temp.n_atoms:
print 'Number of atoms do not match for selection %02d, %s, %s' %(count,resname,sel_string)
sys.exit()
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
if resname == 'atp':
sel_string = 'resname %s and resid %d and %s' %(resname,resid,a_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
sel_string = 'resname %s and resid %d and %s' %(resname,resid,b_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
sel_string = 'resname %s and resid %d and %s' %(resname,resid,g_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
return
elif resname == 'adp':
sel_string = 'resname %s and resid %d and %s' %(resname,resid,a_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
sel_string = 'resname %s and resid %d and %s' %(resname,resid,b_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
return
elif resname == 'PHX':
sel_string = 'resname %s and resid %d and %s' %(resname,resid,inorg_phos)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
return
# ----------------------------------------
# CREATING ANY REMAINING ATOM SELECTIONS...
elif resname in other:
sel_string = 'resname %s and resid %d' %(resname,resid)
u_temp = analysis_universe.select_atoms(sel_string)
selection_list.append(u_temp)
nAtoms.append(u_temp.n_atoms)
ref_temp = ref_universe.select_atoms(sel_string)
ref_pos.append(ref_temp.positions)
output_file.write('%02d %s %s\n' %(count,resname,sel_string))
count +=1
return
# ----------------------------------------
|
rbdavid/RMSD_analyses
|
Basic_RMSD/make_selections.py
|
Python
|
gpl-3.0
| 10,176
|
[
"MDAnalysis"
] |
335376ae574aaef296ea8f294e713d6ac783f76dd36898167b97a131679364bf
|
#
# Parse tree nodes for expressions
#
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object)
import sys
import copy
import operator
from Errors import error, warning, warn_once, InternalError, CompileError
from Errors import hold_errors, release_errors, held_errors, report_error
from Code import UtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import Nodes
from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
import TypeSlots
from Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
import Builtin
import Symtab
from Cython import Utils
from Annotate import AnnotationItem
from Cython.Compiler import Future
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
try:
from builtins import bytes
except ImportError:
bytes = str # Python 2
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.bytes_type, Builtin.basestring_type) : "Cannot convert 'bytes' object to basestring implicitly. This is not portable to Py3.",
(Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(Builtin.basestring_type, Builtin.bytes_type) : "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple)
and env.directives['c_string_encoding']):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if (node is None
or not isinstance(node.constant_result, (int, float, long))):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = 0
is_string_literal = 0
is_attribute = 0
is_subscript = 0
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
is_starred = 0
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s" % (
self.old_temp, self.__class__.__name__))
else:
raise RuntimeError("no temp, but release requested in %s" % (
self.__class__.__name__))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp:
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
if dst_type.is_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) > 2:
# convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.BytesLiteral(self.value[start:stop:step])
value.encoding = self.value.encoding
return BytesNode(
self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
declaration = TreeFragment(u"sizeof(%s)" % self.value, name=pos[0].filename, initial_pos=pos)
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value,
constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.BytesLiteral(
self.bytes_value[start:stop:step])
bytes_value.encoding = self.bytes_value.encoding
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.putln("#if CYTHON_PEP393_ENABLED")
code.put_error_if_neg(
self.pos, "PyUnicode_READY(%s)" % self.result_code)
code.putln("#endif")
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
# We need this for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
code.putln('%s = %s;' % (
self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (self.iter_func_ptr, self.py_result()))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, (int, long)):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
for test_name in ('List', 'Tuple'):
code.putln("if (!%s && Py%s_CheckExact(%s)) {" % (
self.iter_func_ptr, test_name, self.py_result()))
self.generate_next_sequence_item(test_name, result_name, code)
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type = None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
test_if_run = True
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# type_indices [PyrexType]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
type_indices = None
is_subscript = True
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
return self.base.is_ephemeral()
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
pos = self.pos,
positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env)))
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = isinstance(self.index, SliceNode)
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
import MemoryView
skip_child_analysis = True
newaxes = [newaxis for newaxis in indices if newaxis.is_none]
have_slices, indices = MemoryView.unellipsify(indices,
newaxes,
self.base.type.ndim)
self.memslice_index = (not newaxes and
len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if not index.is_none:
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if isinstance(index, SliceNode):
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.is_none:
self.memslice_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified")
return self
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x = x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
#if self.base.type.is_memoryviewslice and not self.base.is_name:
# self.base = self.base.coerce_to_temp(env)
self.base = self.base.coerce_to_simple(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return self
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
if not base_type.is_cfunction:
if isinstance(self.index, TupleNode):
self.index = self.index.analyse_types(
env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif base_type.is_cfunction:
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
self.wrap_in_nonecheck_node(env, getting)
return self
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
if self.base.type.is_memoryviewslice:
if self.is_memslice_copy and not getting:
msg = "Cannot assign to None memoryview slice"
elif self.memslice_slice:
msg = "Cannot slice None memoryview slice"
else:
msg = "Cannot index None memoryview slice"
else:
msg = "'NoneType' object is not subscriptable"
self.base = self.base.as_none_safe_node(msg)
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not Utils.any([specific_type.same_as(t)
for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
elif self.type.is_array:
# fixed-sized arrays aren't l-values
return False
# Just about everything else returned by the index operator
# can be an lvalue.
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.declaration_code("") for param in self.type_indices]))
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, (int, long))
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.declaration_code(""),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.temp_code,
self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code)
elif self.memslice_slice:
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
else:
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
error_value,
code.error_goto(self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
import Buffer, MemoryView
base = self.base
if self.base.is_nonecheck:
base = base.arg
if base.is_name:
entry = base.entry
else:
# SimpleCallNode is_simple is not consistent with coerce_to_simple
assert base.is_simple() or base.is_temp
cname = base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
if sys.version_info < (3,):
def next_(it):
return it.next()
else:
next_ = next
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next_(it)
if not index.stop.is_none:
index.stop = next_(it)
if not index.step.is_none:
index.step = next_(it)
else:
next_(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices,
directives=code.globalstate.directives)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
import MemoryView
MemoryView.assign_scalar(self, rhs, code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_char_ptr_type:
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = '__Pyx_PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = ''
if self.start:
start_offset = self.start_code()
if start_offset == '0':
start_offset = ''
else:
start_offset += '+'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
error(self.pos,
"Slice assignments from pointers are not yet supported.")
# FIXME: fix the array size according to start/stop
array_length = self.base.type.size
for i in range(array_length):
code.putln("%s[%s%s] = %s[%d];" % (
self.base.result(), start_offset, i,
rhs.result(), i))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
slice_size = self.base.type.size + stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
start = self.base.type.size + start
slice_size -= start
start = None
except ValueError:
pass
check = None
if slice_size < 0:
if target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif start is None and stop is None:
# we know the exact slice length
if target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
check = "(%s)-(%s)" % (stop, start)
else: # stop is not None:
check = stop
if check:
code.putln("if (unlikely((%s) != %d)) {" % (check, target_size))
code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % (
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
def __deepcopy__(self, memo):
"""
There is a copy bug in python 2.4 for slice objects.
"""
return SliceNode(
self.pos,
start=copy.deepcopy(self.start, memo),
stop=copy.deepcopy(self.stop, memo),
step=copy.deepcopy(self.step, memo),
is_temp=self.is_temp,
is_literal=self.is_literal,
constant_result=self.constant_result)
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
return
if self.self:
args = [self.self] + self.args
else:
args = self.args
if self.function.type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr();"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname
code.putln("try {")
code.putln("%s%s;" % (lhs, rhs))
code.putln("} catch(...) {")
if self.nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if self.nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not isinstance(self.keyword_args, DictNode):
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from Cython.Compiler.UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_unbound_cmethod_node(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_unbound_cmethod_node(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_unbound_cmethod_node(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
type = self.obj.analyse_as_extension_type(env)
if type:
entry = type.scope.lookup_here(self.attribute)
if entry and entry.is_cmethod:
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
ubcm_entry = Symtab.Entry(entry.name,
"%s->%s" % (type.vtabptr_cname, entry.cname),
entry.type)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and self.obj.is_name
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return not self.type.is_array
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
ExprNode.__init__(self, pos)
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
else:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
if self.type is Builtin.tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join([ arg.py_result() for arg in self.args ]),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is Builtin.tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
node = self
node.is_temp = False
node.is_literal = True
else:
node = SequenceNode.analyse_types(self, env, skip_children)
for child in node.args:
if not child.is_literal:
break
else:
if not node.mult_factor or node.mult_factor.is_literal and \
isinstance(node.mult_factor.constant_result, (int, long)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type:
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, len(self.args))
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too may members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
for i, arg in enumerate(self.args):
code.putln("%s[%s] = %s;" % (
self.result(),
i,
arg.result()))
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop = self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
return self
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(Builtin.py_set_utility_code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
function = 'PyDict_Keys'
else:
function = 'PyMapping_Keys'
code.putln('%s = %s(%s); %s' % (
self.result(), function, dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
arg = self.starstar_arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
self.starstar_arg = arg.as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
self.keyword_args = [ item.analyse_types(env)
for item in self.keyword_args ]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
"PyModule_GetDict(%s)" % Naming.module_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.putln("if (unlikely(__Pyx_PyGen_FetchStopIterationValue(&%s) < 0)) %s" % (
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.result())
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if not cpp_type:
error(self.pos, "'!' operator not defined for %s" % operand_type)
self.type = PyrexTypes.error_type
return
self.type = cpp_type
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
cpp_type = argtype.find_cpp_operation_type(self.operator)
if cpp_type is not None:
self.type = cpp_type
return self
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue")
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.declaration_code("")
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function()
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power"
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = [bytes_type, str_type, basestring_type, unicode_type] # Py2.4 lacks tuple.index()
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function()
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.declaration_code('')
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.declaration_code(''),
minus1_check,
self.operand1.result()))
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) {" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_release_ensured_gil()
code.put_goto(code.error_label)
code.putln("}")
code.put_release_ensured_gil()
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
mod_int_utility_code.specialize(self.type))
else: # float
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# note: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function()
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(
func_name=self.pow_func,
type=self.type.declaration_code(''),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
# Note: This class is temporarily "shut down" into an ineffective temp
# allocation mode.
#
# More sophisticated temp reuse was going on before, one could have a
# look at adding this again after /all/ classes are converted to the
# new temp scheme. (The temp juggling cannot work otherwise).
class BoolBinopNode(ExprNode):
# Short-circuiting boolean operation.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
subexprs = ['operand1', 'operand2']
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
if self.operator == 'and':
self.constant_result = \
self.operand1.constant_result and \
self.operand2.constant_result
else:
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
and self.operand2.compile_time_value(denv)
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
operator = self.operator,
operand1 = self.operand1.coerce_to_boolean(env),
operand2 = self.operand2.coerce_to_boolean(env),
type = PyrexTypes.c_bint_type,
is_temp = self.is_temp)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
self.is_temp = 1
return self
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
test_result, uses_temp = self.generate_operand1_test(code)
if self.operator == 'and':
sense = ""
else:
sense = "!"
code.putln(
"if (%s%s) {" % (
sense,
test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
self.operand1.generate_disposal_code(code)
self.operand2.generate_evaluation_code(code)
self.allocate_temp_result(code)
self.operand2.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand2.result()))
self.operand2.generate_post_assignment_code(code)
self.operand2.free_temps(code)
code.putln("} else {")
self.operand1.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand1.result()))
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(PyrexTypes.c_bint_type,
manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type == PyrexTypes.error_type:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result() )
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, (bytes, unicode)) and
isinstance(operand2_result, (bytes, unicode)) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_Contains"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_Contains"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.is_pycmp = False
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2,
inplace = inplace)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = type
else:
# FIXME: check that the target type and the resulting type are compatible
pass
if arg.type.is_memoryviewslice:
# Register utility codes at this point
arg.type.get_to_py_function(env, arg)
self.env = env
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
arg_type = self.arg.type
if arg_type.is_memoryviewslice:
funccall = arg_type.get_to_py_function(self.env, self.arg)
else:
func = arg_type.to_py_function
if arg_type.is_string or arg_type.is_cpp_string:
if self.type in (bytes_type, str_type, unicode_type):
func = func.replace("Object", self.type.name.title())
elif self.type is bytearray_type:
func = func.replace("Object", "ByteArray")
funccall = "%s(%s)" % (func, self.arg.result())
code.putln('%s = %s; %s' % (
self.result(),
funccall,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_ephemeral():
error(arg.pos,
"Obtaining '%s' from temporary Python value" % result_type)
elif self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
function = self.type.from_py_function
operand = self.arg.py_result()
rhs = "%s(%s)" % (function, operand)
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_expressions(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
filename++; // avoid compiler warnings
lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
|
openilabs/falconlab
|
env/lib/python2.7/site-packages/Cython/Compiler/ExprNodes.py
|
Python
|
mit
| 439,907
|
[
"VisIt"
] |
309ac6642e0fa3d151dc9a2052f788b568d687f61e8582c82e818125e32f1e86
|
"""
Daemon to monitor a file hierarchy and create sequence reports.
Any time 'workup.json' or any .ab1 file is added to a directory, check
if there is a usable set of files in that directory. If so, generate a
sequence report for them.
"""
import os
import pydaemonize
import pyinotify
import syslog
import json
import seqlab.sequence_report as sr
import seqlab.config as cf
def try_report(path, omit_blast):
files = os.listdir(path)
ab1s = [x for x in files if x.endswith('.ab1')]
if 'workup.json' in files and len(ab1s) >= 2:
with open(os.path.join(path,'workup.json')) as h:
workup = json.load(h)
try:
syslog.syslog(syslog.LOG_NOTICE, 'Building sequence report in %s' % (path,))
fate, body = sr.sequence_report((workup, \
os.path.join(path,ab1s[0]),
os.path.join(path, ab1s[1])),
omit_blast)
except Exception, ex:
return False, str(ex)
if fate == 'assembled':
output_filename = os.path.join(path, 'assembly_report.html')
elif fate == 'strandwise':
output_filename = os.path.join(path, 'assembly_report.html')
with open(output_filename, 'w') as output:
print >>output, body
return True, output_filename
else:
return False, 'Not a full complement of files.'
class SequenceReportDaemon(pydaemonize.Daemon):
def __init__(self, config_path='/etc/seqlab.conf', omit_blast=False,
*args, **kwargs):
self.config_path = config_path
self.omit_blast = omit_blast
pydaemonize.Daemon.__init__(self, *args, **kwargs)
def action(self):
omit_blast = self.omit_blast
with open(self.config_path) as h:
config = cf.read_configuration(h)
monitor_path = config['target_path']
syslog.syslog(syslog.LOG_NOTICE, "sequencereportd monitoring %s for runs to process." % monitor_path)
class Handler(pyinotify.ProcessEvent):
def process_IN_UNMOUNT(self, event):
syslog.syslog(syslog.LOG_NOTICE, "Backing filesystem of %s was unmounted. Exiting." % \
(event.path,))
exit(0)
def process_default(self, event):
syslog.syslog(syslog.LOG_NOTICE, "Event on %s in monitored share." % (event.pathname,))
if event.name != 'workup.json' and \
not event.name.endswith('.ab1'):
syslog.syslog(syslog.LOG_NOTICE, "Ignoring event on %s." % (event.pathname,))
return
wrote, result = try_report(event.path, omit_blast)
if not wrote:
syslog.syslog(syslog.LOG_NOTICE, "No action in %s: %s" % \
(event.path,result))
else:
syslog.syslog(syslog.LOG_NOTICE, "Wrote report in %s." % (result,))
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, Handler())
wm.add_watch(monitor_path,
pyinotify.IN_CREATE |
pyinotify.IN_DELETE |
pyinotify.IN_MOVED_TO |
pyinotify.IN_ATTRIB |
pyinotify.IN_MODIFY,
rec=True)
notifier.loop()
def main(args=None):
parser = argparse.ArgumentParser(description='Sequence report generation daemon')
parser.add_argument('-c','--config', default=None,
help="Config file to read (default: /etc/seqlab.conf")
parser.add_argument('--noblast', action='store_true',
help="Don't run BLAST")
parser.parse_args(args)
daemon = SequenceReportDaemon(config_path=parser.config,
omit_blast=parser.noblast)
exit(0)
|
madhadron/seqlabd
|
seqlab/daemons/sequencereportd.py
|
Python
|
gpl-3.0
| 3,937
|
[
"BLAST"
] |
4af8127914fe43e5de6bb15d9c2e75f6effefce958d35152e3295649d89d23ce
|
import array
import mysql.connector
def migration_name():
return "Adding crystal storage columns to char_points table"
def check_preconditions(cur):
return;
def needs_to_run(cur):
# Ensure crystal columns exist in char_points
cur.execute("SHOW COLUMNS FROM char_points LIKE 'fire_crystals'")
if not cur.fetchone():
return True
return False
def migrate(cur, db):
try:
cur.execute("ALTER TABLE char_points \
ADD COLUMN `fire_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `ice_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `wind_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `earth_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `lightning_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `water_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `light_crystals` smallint(5) unsigned NOT NULL DEFAULT 0, \
ADD COLUMN `dark_crystals` smallint(5) unsigned NOT NULL DEFAULT 0;")
db.commit()
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
|
ffxijuggalo/darkstar
|
migrations/crystal_storage.py
|
Python
|
gpl-3.0
| 1,103
|
[
"CRYSTAL"
] |
6ccddf1b2694301eb6bf4427f3ff23d91b12d57723c09dd30c07aa1b89acdc60
|
from distutils.core import setup
import os
def version():
setupDir = os.path.dirname(os.path.realpath(__file__))
versionFile = open(os.path.join(setupDir, 'checkm', 'VERSION'))
return versionFile.read().strip()
setup(
name='checkm-genome',
version=version(),
author='Donovan Parks, Michael Imelfort, Connor Skennerton',
author_email='donovan.parks@gmail.com',
packages=['checkm', 'checkm.plot', 'checkm.test', 'checkm.util'],
scripts=['bin/checkm'],
package_data={'checkm': ['VERSION', 'DATA_CONFIG']},
url='http://pypi.python.org/pypi/checkm/',
license='GPL3',
description='Assess the quality of putative genome bins.',
long_description=open('README.txt').read(),
install_requires=[
"numpy >= 1.8.0",
"scipy >= 0.9.0",
"matplotlib >= 1.3.1",
"pysam >= 0.7.4",
"dendropy >= 4.0.0",
"ScreamingBackpack >= 0.2.333"],
)
|
hunter-cameron/CheckM
|
setup.py
|
Python
|
gpl-3.0
| 929
|
[
"pysam"
] |
78a3508751d7b187101e03d8b166297c29ccb5f513a969311b69c6e5d2dc364c
|
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# Fancy plotting
try:
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"crimson",
"greyish"]
colors = sns.xkcd_palette(color_names)
except:
colors = ['b' ,'r', 'y', 'g']
from pybasicbayes.util.text import progprint_xrange
from pylds.models import DefaultLDS
npr.seed(3)
# Set parameters
D_obs = 1
D_latent = 2
D_input = 0
T = 2000
# Simulate from one LDS
true_model = DefaultLDS(D_obs, D_latent, D_input, sigma_obs=np.eye(D_obs))
inputs = npr.randn(T, D_input)
data, stateseq = true_model.generate(T, inputs=inputs)
# Fit with another LDS
test_model = DefaultLDS(D_obs, D_latent, D_input)
test_model.add_data(data, inputs=inputs)
# Run the Gibbs sampler
N_samples = 100
def update(model):
model.resample_model()
return model.log_likelihood()
lls = [update(test_model) for _ in progprint_xrange(N_samples)]
# Plot the log likelihoods
plt.figure(figsize=(5,3))
plt.plot([0, N_samples], true_model.log_likelihood() * np.ones(2), '--k', label="true")
plt.plot(np.arange(N_samples), lls, color=colors[0], label="test")
plt.xlabel('iteration')
plt.ylabel('training likelihood')
plt.legend(loc="lower right")
plt.tight_layout()
plt.savefig("aux/demo_ll.png")
# Smooth the data
smoothed_data = test_model.smooth(data, inputs)
plt.figure(figsize=(5,3))
plt.plot(data, color=colors[0], lw=2, label="observed")
plt.plot(smoothed_data, color=colors[1], lw=1, label="smoothed")
plt.xlabel("Time")
plt.xlim(0, min(T, 500))
plt.ylabel("Smoothed Data")
plt.ylim(1.2 * np.array(plt.ylim()))
plt.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.savefig("aux/demo_smooth.png")
plt.show()
|
mattjj/pylds
|
examples/simple_demo.py
|
Python
|
mit
| 1,912
|
[
"Amber"
] |
a35405ebbb592cf4039fa133a978a9b76dbb1bd165faa035d58bf67dfd07f823
|
stations = { 'abagaqi': 'AQC',
'acheng': 'ACB',
'aershan': 'ART',
'aershanbei': 'ARX',
'aihe': 'AHP',
'aijiacun': 'AJJ',
'ajin': 'AJD',
'akesu': 'ASR',
'aketao': 'AER',
'alashankou': 'AKR',
'alihe': 'AHX',
'alongshan': 'ASX',
'amuer': 'JTX',
'ananzhuang': 'AZM',
'anda': 'ADX',
'ande': 'ARW',
'anding': 'ADP',
'angangxi': 'AAX',
'anguang': 'AGT',
'anhua': 'PKQ',
'anjia': 'AJB',
'ankang': 'AKY',
'ankouyao': 'AYY',
'anlong': 'AUZ',
'anlu': 'ALN',
'anping': 'APT',
'anqing': 'AQH',
'anqingxi': 'APH',
'anren': 'ARG',
'anshan': 'AST',
'anshanxi': 'AXT',
'anshun': 'ASW',
'anshunxi': 'ASE',
'antang': 'ATV',
'antingbei': 'ASH',
'antu': 'ATL',
'antuxi': 'AXL',
'anxi': 'AXS',
'anyang': 'AYF',
'anyangdong': 'ADF',
'aojiang': 'ARH',
'aolibugao': 'ALD',
'atushi': 'ATR',
'babu': 'BBE',
'bachu': 'BCR',
'badaling': 'ILP',
'badong': 'BNN',
'baibiguan': 'BGV',
'baicheng': 'BCT',
'baigou': 'FEP',
'baiguo': 'BGM',
'baihe': 'BEL',
'baihedong': 'BIY',
'baihexian': 'BEY',
'baijian': 'BAP',
'baijigou': 'BJJ',
'baijipo': 'BBM',
'baikuipu': 'BKB',
'bailang': 'BRZ',
'bailixia': 'AAP',
'baimajing': 'BFQ',
'baiqi': 'BQP',
'baiquan': 'BQL',
'baise': 'BIZ',
'baisha': 'BSW',
'baishanshi': 'HJL',
'baishapo': 'BPM',
'baishishan': 'BAL',
'baishuijiang': 'BSY',
'baishuixian': 'BGY',
'baishuizhen': 'BUM',
'baiyangdian': 'FWP',
'baiyi': 'FHW',
'baiyinchagan': 'BYC',
'baiyinhuanan': 'FNC',
'baiyinhushuo': 'BCD',
'baiyinshi': 'BNJ',
'baiyintala': 'BID',
'baiyinxi': 'BXJ',
'baiyunebo': 'BEC',
'bajiaotai': 'BTD',
'balin': 'BLX',
'bamiancheng': 'BMD',
'bamiantong': 'BMB',
'bancheng': 'BUP',
'banmaoqing': 'BNM',
'bantian': 'BTQ',
'baodi': 'BPP',
'baoding': 'BDP',
'baodingdong': 'BMP',
'baohuashan': 'BWH',
'baoji': 'BJY',
'baojinan': 'BBY',
'baokang': 'BKD',
'baolage': 'BQC',
'baolin': 'BNB',
'baolongshan': 'BND',
'baoqing': 'BUB',
'baoquanling': 'BQB',
'baotou': 'BTC',
'baotoudong': 'FDC',
'bashan': 'BAY',
'baxiantong': 'VXD',
'bayangaole': 'BAC',
'bayuquan': 'BYT',
'bazhong': 'IEW',
'bazhongdong': 'BDE',
'bazhou': 'RMP',
'bazhouxi': 'FOP',
'beian': 'BAB',
'beibei': 'BPW',
'beidaihe': 'BEP',
'beihai': 'BHZ',
'beijiao': 'IBQ',
'beijing': 'BJP',
'beijingbei': 'VAP',
'beijingdong': 'BOP',
'beijingnan': 'VNP',
'beijingxi': 'BXP',
'beijingzi': 'BRT',
'beiliu': 'BOZ',
'beimaquanzi': 'BRP',
'beipiaonan': 'RPD',
'beitai': 'BTT',
'beitun': 'BYP',
'beitunshi': 'BXR',
'beiying': 'BIV',
'beiyinhe': 'BYB',
'beizhai': 'BVP',
'bencha': 'FWH',
'bengbu': 'BBH',
'bengbunan': 'BMH',
'benhong': 'BVC',
'benxi': 'BXT',
'benxihu': 'BHT',
'benxixincheng': 'BVT',
'bijiang': 'BLQ',
'bijiashan': 'BSB',
'bijiguan': 'BJM',
'binhai': 'FHP',
'binhaibei': 'FCP',
'binjiang': 'BJB',
'binxian': 'BXY',
'binyang': 'UKZ',
'binzhou': 'BIK',
'bishan': 'FZW',
'boao': 'BWQ',
'bobai': 'BBZ',
'boketu': 'BKX',
'bole': 'BOR',
'boli': 'BLB',
'botou': 'BZP',
'boxing': 'BXK',
'bozhou': 'BZH',
'buhai': 'BUT',
'buliekai': 'BLR',
'caijiagou': 'CJT',
'caijiapo': 'CJY',
'caishan': 'CON',
'cangnan': 'CEH',
'cangshi': 'CST',
'cangxi': 'CXE',
'cangzhou': 'COP',
'cangzhouxi': 'CBP',
'caohai': 'WBW',
'caohekou': 'CKT',
'caoshi': 'CSL',
'caoxian': 'CXK',
'caozili': 'CFP',
'ceheng': 'CHZ',
'cenxi': 'CNZ',
'chabuga': 'CBC',
'chaigang': 'CGT',
'chaigoupu': 'CGV',
'chaihe': 'CHB',
'chajiang': 'CAM',
'chaka': 'CVO',
'chaling': 'CDG',
'chalingnan': 'CNG',
'changcheng': 'CEJ',
'changchong': 'CCM',
'changchun': 'CCT',
'changchunnan': 'CET',
'changchunxi': 'CRT',
'changde': 'VGQ',
'changdian': 'CDT',
'changge': 'CEF',
'changle': 'CLK',
'changli': 'CLP',
'changlingzi': 'CLT',
'changlinhe': 'FVH',
'changnong': 'CNJ',
'changping': 'CPP',
'changpingbei': 'VBP',
'changpingdong': 'FQQ',
'changpoling': 'CPM',
'changqingqiao': 'CQJ',
'changsha': 'CSQ',
'changshanan': 'CWQ',
'changshantun': 'CVT',
'changshou': 'EFW',
'changshoubei': 'COW',
'changshouhu': 'CSE',
'changting': 'CES',
'changtingnan': 'CNS',
'changtingzhen': 'CDB',
'changtu': 'CTT',
'changtuxi': 'CPT',
'changwu': 'CWY',
'changxing': 'CBH',
'changxingnan': 'CFH',
'changyang': 'CYN',
'changyuan': 'CYF',
'changzheng': 'CZJ',
'changzhi': 'CZF',
'changzhibei': 'CBF',
'changzhou': 'CZH',
'changzhoubei': 'ESH',
'changzhuang': 'CVK',
'chaohu': 'CIH',
'chaohudong': 'GUH',
'chaolianggou': 'CYP',
'chaoshan': 'CBQ',
'chaoyang': 'CYD',
'chaoyangchuan': 'CYL',
'chaoyangdi': 'CDD',
'chaoyangzhen': 'CZL',
'chaozhou': 'CKQ',
'chasuqi': 'CSC',
'chengcheng': 'CUY',
'chengde': 'CDP',
'chengdedong': 'CCP',
'chengdu': 'CDW',
'chengdudong': 'ICW',
'chengdunan': 'CNW',
'chenggaozi': 'CZB',
'chenggu': 'CGY',
'chengjisihan': 'CJX',
'chenguanying': 'CAJ',
'chengyang': 'CEK',
'chengzitan': 'CWT',
'chenming': 'CMB',
'chenqing': 'CQB',
'chenxi': 'CXQ',
'chenxiangtun': 'CXT',
'chenzhou': 'CZQ',
'chenzhouxi': 'ICQ',
'chezhuanwan': 'CWM',
'chibi': 'CBN',
'chibibei': 'CIN',
'chifeng': 'CFD',
'chifengxi': 'CID',
'chizhou': 'IYH',
'chongqing': 'CQW',
'chongqingbei': 'CUW',
'chongqingnan': 'CRW',
'chongren': 'CRG',
'chongzuo': 'CZZ',
'chuangyecun': 'CEX',
'chunwan': 'CQQ',
'chunyang': 'CAL',
'chushan': 'CSB',
'chuxiong': 'COM',
'chuzhou': 'CXH',
'chuzhoubei': 'CUH',
'cili': 'CUQ',
'cishan': 'CSP',
'cixi': 'CRP',
'cixian': 'CIP',
'ciyao': 'CYK',
'congjiang': 'KNW',
'cuihuangkou': 'CHP',
'cuogang': 'CAX',
'daan': 'RAT',
'daanbei': 'RNT',
'daba': 'DBJ',
'daban': 'DBC',
'dachaigou': 'DGJ',
'dacheng': 'DCT',
'dadenggou': 'DKJ',
'dafangnan': 'DNE',
'daguan': 'RGW',
'daguantun': 'DTT',
'dagushan': 'RMT',
'dahongqi': 'DQD',
'dahuichang': 'DHP',
'dahushan': 'DHD',
'dailing': 'DLB',
'daixian': 'DKV',
'daiyue': 'RYV',
'dajiagou': 'DJT',
'dajian': 'DFP',
'daju': 'DIM',
'dakoutun': 'DKP',
'dalateqi': 'DIC',
'dalatexi': 'DNC',
'dali': 'DNY',
'dalian': 'DLT',
'dalianbei': 'DFT',
'dalin': 'DLD',
'daluhao': 'DLC',
'dandong': 'DUT',
'dandongxi': 'RWT',
'danfeng': 'DGY',
'dangshan': 'DKH',
'dangshannan': 'PRH',
'dangtudong': 'OWH',
'dangyang': 'DYN',
'dani': 'DNZ',
'dantu': 'RUH',
'danxiashan': 'IRQ',
'danyang': 'DYH',
'danyangbei': 'EXH',
'daobao': 'RBT',
'daoerdeng': 'DRD',
'daoqing': 'DML',
'daozhou': 'DFZ',
'dapanshi': 'RPP',
'dapingfang': 'DPD',
'dapu': 'DVT',
'daqilaha': 'DQX',
'daqing': 'DZX',
'daqingdong': 'LFX',
'daqinggou': 'DSD',
'daqingxi': 'RHX',
'dashiqiao': 'DQT',
'dashitou': 'DSL',
'dashitounan': 'DAL',
'dashizhai': 'RZT',
'datianbian': 'DBM',
'datong': 'DTV',
'datongxi': 'DTO',
'datun': 'DNT',
'dawang': 'WWQ',
'dawangtan': 'DZZ',
'dawanzi': 'DFM',
'dawukou': 'DFJ',
'daxing': 'DXX',
'daxinggou': 'DXL',
'dayan': 'DYX',
'dayangshu': 'DUX',
'dayebei': 'DBN',
'daying': 'DYV',
'dayingdong': 'IAW',
'dayingzhen': 'DJP',
'dayingzi': 'DZD',
'dayu': 'DYG',
'dayuan': 'DYZ',
'dazhanchang': 'DTJ',
'dazhangzi': 'DAP',
'dazhou': 'RXW',
'dazhuyuan': 'DZY',
'dazunan': 'FQW',
'dean': 'DAG',
'debao': 'RBZ',
'debosi': 'RDT',
'dechang': 'DVW',
'deerbuer': 'DRX',
'dehui': 'DHT',
'dehuixi': 'DXT',
'delingha': 'DHO',
'dengshahe': 'DWT',
'dengta': 'DGT',
'dengzhou': 'DOF',
'deqing': 'DRH',
'deqingxi': 'MOH',
'dexing': 'DWG',
'deyang': 'DYW',
'dezhou': 'DZP',
'dezhoudong': 'DIP',
'dianjiang': 'DJE',
'dianxin': 'DXM',
'didao': 'DDB',
'dingbian': 'DYJ',
'dinghudong': 'UWQ',
'dinghushan': 'NVQ',
'dingnan': 'DNG',
'dingtao': 'DQK',
'dingxi': 'DSJ',
'dingxiang': 'DXV',
'dingyuan': 'EWH',
'dingzhou': 'DXP',
'dingzhoudong': 'DOP',
'diwopu': 'DWJ',
'dizhuang': 'DVQ',
'dongandong': 'DCZ',
'dongbianjing': 'DBB',
'dongdaihe': 'RDD',
'dongerdaohe': 'DRB',
'dongfang': 'UFQ',
'dongfanghong': 'DFB',
'dongfeng': 'DIL',
'donggangbei': 'RGT',
'dongguan': 'RTQ',
'dongguandong': 'DMQ',
'dongguang': 'DGP',
'donghai': 'DHB',
'donghaixian': 'DQH',
'dongjin': 'DKB',
'dongjingcheng': 'DJB',
'donglai': 'RVD',
'dongmiaohe': 'DEP',
'dongmingcun': 'DMD',
'dongmingxian': 'DNF',
'dongsheng': 'DOC',
'dongshengxi': 'DYC',
'dongtai': 'DBH',
'dongtonghua': 'DTL',
'dongwan': 'DRJ',
'dongxiang': 'DXG',
'dongxinzhuang': 'DXD',
'dongxu': 'RXP',
'dongying': 'DPK',
'dongyingnan': 'DOK',
'dongyudi': 'DBV',
'dongzhen': 'DNV',
'dongzhi': 'DCH',
'dongzhuang': 'DZV',
'douluo': 'DLV',
'douzhangzhuang': 'RZP',
'douzhuang': 'ROP',
'duanzhou': 'WZQ',
'duge': 'DMM',
'duiqingshan': 'DQB',
'duizhen': 'DWV',
'dujia': 'DJL',
'dujiangyan': 'DDW',
'dulitun': 'DTX',
'dunhua': 'DHL',
'dunhuang': 'DHJ',
'dushan': 'RWW',
'dushupu': 'DPM',
'duyun': 'RYW',
'duyundong': 'KJW',
'ebian': 'EBW',
'eerduosi': 'EEC',
'ejina': 'EJC',
'emei': 'EMW',
'emeishan': 'IXW',
'enshi': 'ESN',
'erdaogoumen': 'RDP',
'erdaowan': 'RDX',
'erlian': 'RLC',
'erlong': 'RLD',
'erlongshantun': 'ELA',
'ermihe': 'RML',
'erying': 'RYJ',
'ezhou': 'ECN',
'ezhoudong': 'EFN',
'faer': 'FEM',
'fanchangxi': 'PUH',
'fangchenggangbei': 'FBZ',
'fanjiatun': 'FTT',
'fanshi': 'FSV',
'fanzhen': 'VZK',
'faqi': 'FQE',
'feidong': 'FIH',
'feixian': 'FXK',
'fengcheng': 'FCG',
'fengchengdong': 'FDT',
'fengchengnan': 'FNG',
'fengdu': 'FUW',
'fenghua': 'FHH',
'fenghuangcheng': 'FHT',
'fenghuangjichang': 'FJQ',
'fenglezhen': 'FZB',
'fenglingdu': 'FLV',
'fengshuicun': 'FSJ',
'fengshun': 'FUQ',
'fengtun': 'FTX',
'fengxian': 'FXY',
'fengyang': 'FUH',
'fengzhen': 'FZC',
'fengzhou': 'FZY',
'fenhe': 'FEV',
'fenyang': 'FAV',
'fenyi': 'FYG',
'foshan': 'FSQ',
'fuan': 'FAS',
'fuchuan': 'FDZ',
'fuding': 'FES',
'fuhai': 'FHR',
'fujin': 'FIB',
'fulaerji': 'FRX',
'fuling': 'FLW',
'fulingbei': 'FEW',
'fuliqu': 'FLJ',
'fulitun': 'FTB',
'funan': 'FNH',
'funing': 'FNP',
'fuqing': 'FQS',
'fuquan': 'VMW',
'fushankou': 'FKP',
'fushanzhen': 'FZQ',
'fushun': 'FST',
'fushunbei': 'FET',
'fusong': 'FSL',
'fusui': 'FSZ',
'futian': 'NZQ',
'futuyu': 'FYP',
'fuxian': 'FEY',
'fuxiandong': 'FDY',
'fuxinnan': 'FXD',
'fuyang': 'FYH',
'fuyu': 'FYT',
'fuyuan': 'FYB',
'fuyubei': 'FBT',
'fuzhou': 'FZG',
'fuzhoubei': 'FBG',
'fuzhoudong': 'FDG',
'fuzhounan': 'FYS',
'gaizhou': 'GXT',
'gaizhouxi': 'GAT',
'gancaodian': 'GDJ',
'gangou': 'GGL',
'gangu': 'GGJ',
'ganhe': 'GAX',
'ganluo': 'VOW',
'ganqika': 'GQD',
'ganquan': 'GQY',
'ganquanbei': 'GEY',
'ganshui': 'GSW',
'gantang': 'GNJ',
'ganzhou': 'GZG',
'gaoan': 'GCG',
'gaobeidian': 'GBP',
'gaobeidiandong': 'GMP',
'gaocheng': 'GEP',
'gaocun': 'GCV',
'gaogezhuang': 'GGP',
'gaolan': 'GEJ',
'gaoloufang': 'GFM',
'gaomi': 'GMK',
'gaoping': 'GPF',
'gaoqiaozhen': 'GZD',
'gaoshanzi': 'GSD',
'gaotai': 'GTJ',
'gaotainan': 'GAJ',
'gaotan': 'GAY',
'gaoyi': 'GIP',
'gaoyixi': 'GNP',
'gaozhou': 'GSQ',
'gashidianzi': 'GXD',
'gediannan': 'GNN',
'geermu': 'GRO',
'gegenmiao': 'GGT',
'geju': 'GEM',
'genhe': 'GEX',
'gezhenpu': 'GZT',
'gongcheng': 'GCZ',
'gongmiaozi': 'GMC',
'gongnonghu': 'GRT',
'gongpengzi': 'GPT',
'gongqingcheng': 'GAG',
'gongyi': 'GXF',
'gongyinan': 'GYF',
'gongyingzi': 'GYD',
'gongzhuling': 'GLT',
'gongzhulingnan': 'GBT',
'goubangzi': 'GBD',
'guan': 'GFP',
'guangan': 'VJW',
'guangannan': 'VUW',
'guangao': 'GVP',
'guangde': 'GRH',
'guanghan': 'GHW',
'guanghanbei': 'GVW',
'guangmingcheng': 'IMQ',
'guangnanwei': 'GNM',
'guangning': 'FBQ',
'guangningsi': 'GQT',
'guangningsinan': 'GNT',
'guangshan': 'GUN',
'guangshui': 'GSN',
'guangtongbei': 'GPM',
'guangyuan': 'GYW',
'guangyuannan': 'GAW',
'guangze': 'GZS',
'guangzhou': 'GZQ',
'guangzhoubei': 'GBQ',
'guangzhoudong': 'GGQ',
'guangzhounan': 'IZQ',
'guangzhouxi': 'GXQ',
'guanlin': 'GLF',
'guanling': 'GLE',
'guanshui': 'GST',
'guanting': 'GTP',
'guantingxi': 'KEP',
'guanzhaishan': 'GSS',
'guanzijing': 'GOT',
'guazhou': 'GZJ',
'gucheng': 'GCN',
'guchengzhen': 'GZB',
'gudong': 'GDV',
'guian': 'GAE',
'guiding': 'GTW',
'guidingbei': 'FMW',
'guidingnan': 'IDW',
'guidingxian': 'KIW',
'guigang': 'GGZ',
'guilin': 'GLZ',
'guilinbei': 'GBZ',
'guilinxi': 'GEZ',
'guiliuhe': 'GHT',
'guiping': 'GAZ',
'guixi': 'GXG',
'guiyang': 'GIW',
'guiyangbei': 'KQW',
'gujiao': 'GJV',
'gujiazi': 'GKT',
'gulang': 'GLJ',
'gulian': 'GRX',
'guojiadian': 'GDT',
'guoleizhuang': 'GLP',
'guosong': 'GSL',
'guoyang': 'GYH',
'guozhen': 'GZY',
'gushankou': 'GSP',
'gushi': 'GXN',
'gutian': 'GTS',
'gutianbei': 'GBS',
'gutianhuizhi': 'STS',
'guyuan': 'GUJ',
'guzhen': 'GEH',
'haerbin': 'HBB',
'haerbinbei': 'HTB',
'haerbindong': 'VBB',
'haerbinxi': 'VAB',
'haianxian': 'HIH',
'haibei': 'HEB',
'haicheng': 'HCT',
'haichengxi': 'HXT',
'haidongxi': 'HDO',
'haikou': 'VUQ',
'haikoudong': 'HMQ',
'hailaer': 'HRX',
'hailin': 'HRB',
'hailong': 'HIL',
'hailun': 'HLB',
'haining': 'HNH',
'hainingxi': 'EUH',
'haishiwan': 'HSO',
'haituozi': 'HZT',
'haiwan': 'RWH',
'haiyang': 'HYK',
'haiyangbei': 'HEK',
'halahai': 'HIT',
'halasu': 'HAX',
'hami': 'HMR',
'hancheng': 'HCY',
'hanchuan': 'HCN',
'hanconggou': 'HKB',
'handan': 'HDP',
'handandong': 'HPP',
'hanfuwan': 'HXJ',
'hangjinhouqi': 'HDC',
'hangu': 'HGP',
'hangzhou': 'HZH',
'hangzhoudong': 'HGH',
'hangzhounan': 'XHH',
'hanjiang': 'HJS',
'hankou': 'HKN',
'hanling': 'HAT',
'hanmaying': 'HYP',
'hanshou': 'VSQ',
'hanyin': 'HQY',
'hanyuan': 'WHW',
'hanzhong': 'HOY',
'haolianghe': 'HHB',
'hebei': 'HMB',
'hebi': 'HAF',
'hebian': 'HBV',
'hebidong': 'HFF',
'hechuan': 'WKW',
'hechun': 'HCZ',
'hefei': 'HFH',
'hefeibeicheng': 'COH',
'hefeinan': 'ENH',
'hefeixi': 'HTH',
'hegang': 'HGB',
'heichongtan': 'HCJ',
'heihe': 'HJB',
'heijing': 'HIM',
'heishui': 'HOT',
'heitai': 'HQB',
'heiwang': 'HWK',
'hejiadian': 'HJJ',
'hejianxi': 'HXP',
'hejin': 'HJV',
'hejing': 'HJR',
'hekoubei': 'HBM',
'hekounan': 'HKJ',
'heli': 'HOB',
'helong': 'HLL',
'hengdaohezi': 'HDB',
'hengfeng': 'HFG',
'henggouqiaodong': 'HNN',
'hengnan': 'HNG',
'hengshan': 'HSQ',
'hengshanxi': 'HEQ',
'hengshui': 'HSP',
'hengyang': 'HYQ',
'hengyangdong': 'HVQ',
'heping': 'VAQ',
'hepu': 'HVZ',
'heqing': 'HQM',
'heshengqiaodong': 'HLN',
'heshituoluogai': 'VSR',
'heshuo': 'VUR',
'hetian': 'VTR',
'heyang': 'HAY',
'heyangbei': 'HTY',
'heyuan': 'VIQ',
'heze': 'HIK',
'hezhou': 'HXZ',
'hongan': 'HWN',
'honganxi': 'VXN',
'hongguangzhen': 'IGW',
'hongguo': 'HEM',
'honghe': 'HPB',
'honghuagou': 'VHD',
'hongjiang': 'HFM',
'hongqing': 'HEY',
'hongshan': 'VSB',
'hongshaxian': 'VSJ',
'hongsipu': 'HSJ',
'hongtong': 'HDV',
'hongtongxi': 'HTV',
'hongxiantai': 'HTJ',
'hongxing': 'VXB',
'hongxinglong': 'VHB',
'hongyan': 'VIX',
'houhu': 'IHN',
'houma': 'HMV',
'houmaxi': 'HPV',
'houmen': 'KMQ',
'huacheng': 'VCQ',
'huade': 'HGC',
'huahu': 'KHN',
'huaian': 'AUH',
'huaiannan': 'AMH',
'huaibei': 'HRH',
'huaibin': 'HVN',
'huaihua': 'HHQ',
'huaihuanan': 'KAQ',
'huaiji': 'FAQ',
'huainan': 'HAH',
'huainandong': 'HOH',
'huairen': 'HRV',
'huairendong': 'HFV',
'huairou': 'HRP',
'huairoubei': 'HBP',
'huaiyin': 'IYN',
'huajia': 'HJT',
'huajiazhuang': 'HJM',
'hualin': 'HIB',
'huanan': 'HNB',
'huangbai': 'HBL',
'huangchuan': 'KCN',
'huangcun': 'HCP',
'huanggang': 'KGN',
'huanggangdong': 'KAN',
'huanggangxi': 'KXN',
'huangguayuan': 'HYM',
'huanggutun': 'HTT',
'huanghejingqu': 'HCF',
'huanghuatong': 'HUD',
'huangkou': 'KOH',
'huangling': 'ULY',
'huanglingnan': 'VLY',
'huangliu': 'KLQ',
'huangmei': 'VEH',
'huangnihe': 'HHL',
'huangshan': 'HKH',
'huangshanbei': 'NYH',
'huangshi': 'HSN',
'huangshibei': 'KSN',
'huangshidong': 'OSN',
'huangsongdian': 'HDL',
'huangyangtan': 'HGJ',
'huangyangzhen': 'HYJ',
'huangyuan': 'HNO',
'huangzhou': 'VON',
'huantai': 'VTK',
'huanxintian': 'VTB',
'huapengzi': 'HZM',
'huaqiao': 'VQH',
'huarong': 'HRN',
'huarongdong': 'HPN',
'huarongnan': 'KRN',
'huashan': 'HSY',
'huashanbei': 'HDY',
'huashannan': 'KNN',
'huaying': 'HUW',
'huayuan': 'HUN',
'huayuankou': 'HYT',
'huazhou': 'HZZ',
'huhehaote': 'HHC',
'huhehaotedong': 'NDC',
'huian': 'HNS',
'huichangbei': 'XEG',
'huidong': 'KDQ',
'huihuan': 'KHQ',
'huinong': 'HMJ',
'huishan': 'VCH',
'huitong': 'VTQ',
'huixian': 'HYY',
'huizhou': 'HCQ',
'huizhounan': 'KNQ',
'huizhouxi': 'VXQ',
'hukou': 'HKG',
'hulan': 'HUB',
'hulin': 'VLB',
'huludao': 'HLD',
'huludaobei': 'HPD',
'hulusitai': 'VTJ',
'humen': 'IUQ',
'hunchun': 'HUL',
'hunhe': 'HHT',
'huoerguosi': 'HFR',
'huojia': 'HJF',
'huolianzhai': 'HLT',
'huolinguole': 'HWD',
'huoqiu': 'FBH',
'huozhou': 'HZV',
'huozhoudong': 'HWV',
'hushiha': 'HHP',
'hushitai': 'HUT',
'huzhou': 'VZH',
'jiafeng': 'JFF',
'jiagedaqi': 'JGX',
'jialuhe': 'JLF',
'jiamusi': 'JMB',
'jian': 'JAL',
'jianchang': 'JFD',
'jianfeng': 'PFQ',
'jiangbiancun': 'JBG',
'jiangdu': 'UDH',
'jianghua': 'JHZ',
'jiangjia': 'JJB',
'jiangjin': 'JJW',
'jiangle': 'JLS',
'jiangmen': 'JWQ',
'jiangning': 'JJH',
'jiangningxi': 'OKH',
'jiangqiao': 'JQX',
'jiangshan': 'JUH',
'jiangsuotian': 'JOM',
'jiangyan': 'UEH',
'jiangyong': 'JYZ',
'jiangyou': 'JFW',
'jiangyuan': 'SZL',
'jianhu': 'AJH',
'jianningxianbei': 'JCS',
'jianou': 'JVS',
'jianouxi': 'JUS',
'jiansanjiang': 'JIB',
'jianshe': 'JET',
'jianshi': 'JRN',
'jianshui': 'JSM',
'jianyang': 'JYS',
'jianyangnan': 'JOW',
'jiaocheng': 'JNV',
'jiaohe': 'JHL',
'jiaohexi': 'JOL',
'jiaomei': 'JES',
'jiaozhou': 'JXK',
'jiaozhoubei': 'JZK',
'jiaozuo': 'JOF',
'jiaozuodong': 'WEF',
'jiashan': 'JSH',
'jiashannan': 'EAH',
'jiaxiang': 'JUK',
'jiaxing': 'JXH',
'jiaxingnan': 'EPH',
'jiaxinzi': 'JXT',
'jiayuguan': 'JGJ',
'jiayuguannan': 'JBJ',
'jidong': 'JOB',
'jieshoushi': 'JUN',
'jiexiu': 'JXV',
'jiexiudong': 'JDV',
'jieyang': 'JRQ',
'jiguanshan': 'JST',
'jijiagou': 'VJD',
'jilin': 'JLL',
'jiling': 'JLJ',
'jimobei': 'JVK',
'jinan': 'JNK',
'jinandong': 'JAK',
'jinanxi': 'JGK',
'jinbaotun': 'JBD',
'jinchang': 'JCJ',
'jincheng': 'JCF',
'jinchengbei': 'JEF',
'jinchengjiang': 'JJZ',
'jingbian': 'JIY',
'jingchuan': 'JAJ',
'jingde': 'NSH',
'jingdezhen': 'JCG',
'jingdian': 'JFP',
'jinggangshan': 'JGG',
'jinghai': 'JHP',
'jinghe': 'JHR',
'jinghenan': 'JIR',
'jingmen': 'JMN',
'jingnan': 'JNP',
'jingoutun': 'VGP',
'jingpeng': 'JPC',
'jingshan': 'JCN',
'jingtai': 'JTJ',
'jingtieshan': 'JVJ',
'jingxi': 'JMZ',
'jingxian': 'LOH',
'jingxing': 'JJP',
'jingyu': 'JYL',
'jingyuan': 'JYJ',
'jingyuanxi': 'JXJ',
'jingzhou': 'JBN',
'jinhe': 'JHX',
'jinhua': 'JBH',
'jinhuanan': 'RNH',
'jining': 'JIK',
'jiningnan': 'JAC',
'jinjiang': 'JJS',
'jinkeng': 'JKT',
'jinmacun': 'JMM',
'jinshanbei': 'EGH',
'jinshantun': 'JTB',
'jinxian': 'JUG',
'jinxiannan': 'JXG',
'jinyintan': 'JTN',
'jinyuewan': 'PYQ',
'jinyun': 'JYH',
'jinyunxi': 'PYH',
'jinzhai': 'JZH',
'jinzhangzi': 'JYD',
'jinzhong': 'JZV',
'jinzhou': 'JZD',
'jinzhounan': 'JOD',
'jishan': 'JVV',
'jishou': 'JIQ',
'jishu': 'JSL',
'jiujiang': 'JJG',
'jiuquan': 'JQJ',
'jiuquannan': 'JNJ',
'jiusan': 'SSX',
'jiutai': 'JTL',
'jiutainan': 'JNL',
'jiuzhuangwo': 'JVP',
'jiwen': 'JWX',
'jixi': 'JXB',
'jixian': 'JKP',
'jixibei': 'NRH',
'jixixian': 'JRH',
'jiyuan': 'JYF',
'juancheng': 'JCK',
'jubao': 'JRT',
'junan': 'JOK',
'junde': 'JDB',
'junliangchengbei': 'JMP',
'jurongxi': 'JWH',
'juxian': 'JKK',
'juye': 'JYK',
'kaian': 'KAT',
'kaifeng': 'KFF',
'kaifengbei': 'KBF',
'kaijiang': 'KAW',
'kaili': 'KLW',
'kailinan': 'QKW',
'kailu': 'KLC',
'kaitong': 'KTT',
'kaiyang': 'KVW',
'kaiyuan': 'KYT',
'kaiyuanxi': 'KXT',
'kalaqi': 'KQX',
'kangcheng': 'KCP',
'kangjinjing': 'KJB',
'kangxiling': 'KXZ',
'kangzhuang': 'KZP',
'kashi': 'KSR',
'kedong': 'KOB',
'kelamayi': 'KHR',
'kelan': 'KLV',
'keshan': 'KSB',
'keyihe': 'KHX',
'kouqian': 'KQL',
'kuandian': 'KDT',
'kuche': 'KCR',
'kuduer': 'KDX',
'kuerle': 'KLR',
'kuishan': 'KAB',
'kuitan': 'KTQ',
'kuitun': 'KTR',
'kulun': 'KLD',
'kundulunzhao': 'KDC',
'kunming': 'KMM',
'kunmingxi': 'KXM',
'kunshan': 'KSH',
'kunshannan': 'KNH',
'kunyang': 'KAM',
'lagu': 'LGB',
'laha': 'LHX',
'laibin': 'UBZ',
'laibinbei': 'UCZ',
'laituan': 'LVZ',
'laiwudong': 'LWK',
'laiwuxi': 'UXK',
'laixi': 'LXK',
'laixibei': 'LBK',
'laiyang': 'LYK',
'laiyuan': 'LYP',
'laizhou': 'LZS',
'lalin': 'LAB',
'lamadian': 'LMX',
'lancun': 'LCK',
'langang': 'LNB',
'langfang': 'LJP',
'langfangbei': 'LFP',
'langweishan': 'LRJ',
'langxiang': 'LXB',
'langzhong': 'LZE',
'lankao': 'LKF',
'lankaonan': 'LUF',
'lanling': 'LLB',
'lanlingbei': 'COK',
'lanxi': 'LWH',
'lanzhou': 'LZJ',
'lanzhoudong': 'LVJ',
'lanzhouxi': 'LAJ',
'lanzhouxinqu': 'LQJ',
'laobian': 'LLT',
'laochengzhen': 'ACQ',
'laofu': 'UFD',
'laolai': 'LAX',
'laoying': 'LXL',
'lasa': 'LSO',
'lazha': 'LEM',
'lechang': 'LCQ',
'ledong': 'UQQ',
'ledu': 'LDO',
'ledunan': 'LVO',
'leiyang': 'LYQ',
'leiyangxi': 'LPQ',
'leizhou': 'UAQ',
'lengshuijiangdong': 'UDQ',
'lepingshi': 'LPG',
'leshan': 'IVW',
'leshanbei': 'UTW',
'leshancun': 'LUM',
'liangdang': 'LDY',
'liangdixia': 'LDP',
'lianggezhuang': 'LGP',
'liangjia': 'UJT',
'liangjiadian': 'LRT',
'liangping': 'UQW',
'liangpingnan': 'LPE',
'liangshan': 'LMK',
'lianjiang': 'LJZ',
'lianjiangkou': 'LHB',
'lianshanguan': 'LGT',
'lianyuan': 'LAQ',
'lianyungang': 'UIH',
'lianyungangdong': 'UKH',
'liaocheng': 'UCK',
'liaoyang': 'LYT',
'liaoyuan': 'LYL',
'liaozhong': 'LZD',
'licheng': 'UCP',
'lichuan': 'LCN',
'liduigongyuan': 'INW',
'lijia': 'LJB',
'lijiang': 'LHM',
'lijiaping': 'LIJ',
'lijinnan': 'LNK',
'lilinbei': 'KBQ',
'liling': 'LLG',
'lilingdong': 'UKQ',
'limudian': 'LMB',
'lincheng': 'UUP',
'linchuan': 'LCG',
'lindong': 'LRC',
'linfen': 'LFV',
'linfenxi': 'LXV',
'lingaonan': 'KGQ',
'lingbao': 'LBF',
'lingbaoxi': 'LPF',
'lingbi': 'GMH',
'lingcheng': 'LGK',
'linghai': 'JID',
'lingling': 'UWZ',
'lingqiu': 'LVV',
'lingshi': 'LSV',
'lingshidong': 'UDV',
'lingshui': 'LIQ',
'lingwu': 'LNJ',
'lingyuan': 'LYD',
'lingyuandong': 'LDD',
'linhai': 'LXX',
'linhe': 'LHC',
'linjialou': 'ULK',
'linjiang': 'LQL',
'linkou': 'LKB',
'linli': 'LWQ',
'linqing': 'UQK',
'linshengpu': 'LBT',
'linxi': 'LXC',
'linxiang': 'LXQ',
'linyi': 'LUK',
'linyibei': 'UYK',
'linying': 'LNF',
'linyuan': 'LYX',
'linze': 'LEJ',
'linzenan': 'LDJ',
'liquan': 'LGY',
'lishizhai': 'LET',
'lishui': 'USH',
'lishuzhen': 'LSB',
'litang': 'LTZ',
'liudaohezi': 'LVP',
'liuhe': 'LNL',
'liuhezhen': 'LEX',
'liujiadian': 'UDT',
'liujiahe': 'LVT',
'liulinnan': 'LKV',
'liupanshan': 'UPJ',
'liupanshui': 'UMW',
'liushuigou': 'USP',
'liushutun': 'LSD',
'liuyuan': 'DHR',
'liuyuannan': 'LNR',
'liuzhi': 'LIW',
'liuzhou': 'LZZ',
'liwang': 'VLJ',
'lixian': 'LEQ',
'liyang': 'LEH',
'lizhi': 'LZX',
'longandong': 'IDZ',
'longchang': 'LCW',
'longchangbei': 'NWW',
'longchuan': 'LUQ',
'longdongbao': 'FVW',
'longfeng': 'KFQ',
'longgou': 'LGJ',
'longgudian': 'LGM',
'longhua': 'LHP',
'longjia': 'UJL',
'longjiang': 'LJX',
'longjing': 'LJL',
'longli': 'LLW',
'longlibei': 'KFW',
'longnan': 'UNG',
'longquansi': 'UQJ',
'longshanzhen': 'LAS',
'longshi': 'LAG',
'longtangba': 'LBM',
'longxi': 'LXJ',
'longxian': 'LXY',
'longyan': 'LYS',
'longyou': 'LMH',
'longzhen': 'LZA',
'longzhuagou': 'LZT',
'loudi': 'LDQ',
'loudinan': 'UOQ',
'luan': 'UAH',
'luanhe': 'UDP',
'luanheyan': 'UNP',
'luanping': 'UPP',
'luanxian': 'UXP',
'luchaogang': 'UCH',
'lucheng': 'UTP',
'luchuan': 'LKZ',
'ludao': 'LDL',
'lueyang': 'LYY',
'lufan': 'LVM',
'lufeng': 'LLQ',
'lufengnan': 'LQM',
'lugou': 'LOM',
'lujiang': 'UJH',
'lukoupu': 'LKQ',
'luliang': 'LRM',
'lulong': 'UAP',
'luntai': 'LAR',
'luocheng': 'VCZ',
'luofa': 'LOP',
'luohe': 'LON',
'luohexi': 'LBN',
'luojiang': 'LJW',
'luojiangdong': 'IKW',
'luomen': 'LMJ',
'luoping': 'LPM',
'luopoling': 'LPP',
'luoshan': 'LRN',
'luotuoxiang': 'LTJ',
'luowansanjiang': 'KRW',
'luoyang': 'LYF',
'luoyangdong': 'LDF',
'luoyanglongmen': 'LLF',
'luoyuan': 'LVS',
'lushan': 'LAF',
'lushuihe': 'LUL',
'lutai': 'LTP',
'luxi': 'LUG',
'luzhai': 'LIZ',
'luzhaibei': 'LSZ',
'lvboyuan': 'LCF',
'lvhua': 'LWJ',
'lvliang': 'LHV',
'lvshun': 'LST',
'maanshan': 'MAH',
'maanshandong': 'OMH',
'macheng': 'MCN',
'machengbei': 'MBN',
'mahuang': 'MHZ',
'maiyuan': 'MYS',
'malan': 'MLR',
'malianhe': 'MHB',
'malin': 'MID',
'malong': 'MGM',
'manasi': 'MSR',
'manasihu': 'MNR',
'mangui': 'MHX',
'manshuiwan': 'MKW',
'manzhouli': 'MLX',
'maoba': 'MBY',
'maobaguan': 'MGY',
'maocaoping': 'KPM',
'maochen': 'MHN',
'maoershan': 'MRB',
'maolin': 'MLD',
'maoling': 'MLZ',
'maoming': 'MDQ',
'maomingxi': 'MMZ',
'maoshezu': 'MOM',
'maqiaohe': 'MQB',
'masanjia': 'MJT',
'mashan': 'MAB',
'mawei': 'VAW',
'mayang': 'MVQ',
'meihekou': 'MHL',
'meilan': 'MHQ',
'meishan': 'MSW',
'meishandong': 'IUW',
'meixi': 'MEB',
'meizhou': 'MOQ',
'mengdonghe': 'MUQ',
'mengjiagang': 'MGB',
'mengzhuang': 'MZF',
'mengzi': 'MZM',
'mengzibei': 'MBM',
'menyuan': 'MYO',
'mianchi': 'MCF',
'mianchinan': 'MNF',
'mianduhe': 'MDX',
'mianning': 'UGW',
'mianxian': 'MVY',
'mianyang': 'MYW',
'miaocheng': 'MAP',
'miaoling': 'MLL',
'miaoshan': 'MSN',
'miaozhuang': 'MZJ',
'midu': 'MDF',
'miluo': 'MLQ',
'miluodong': 'MQQ',
'mingcheng': 'MCL',
'minggang': 'MGN',
'minggangdong': 'MDN',
'mingguang': 'MGH',
'mingshuihe': 'MUT',
'mingzhu': 'MFQ',
'minhenan': 'MNO',
'minji': 'MJN',
'minle': 'MBJ',
'minqing': 'MQS',
'minqingbei': 'MBS',
'minquan': 'MQF',
'minquanbei': 'MIF',
'mishan': 'MSB',
'mishazi': 'MST',
'miyi': 'MMW',
'miyunbei': 'MUP',
'mizhi': 'MEY',
'modaoshi': 'MOB',
'moerdaoga': 'MRX',
'mohe': 'MVX',
'moyu': 'MUR',
'mudanjiang': 'MDB',
'muling': 'MLB',
'mulitu': 'MUD',
'mupang': 'MPQ',
'muping': 'MBK',
'nailin': 'NLD',
'naiman': 'NMD',
'naluo': 'ULZ',
'nanboshan': 'NBK',
'nanbu': 'NBE',
'nancao': 'NEF',
'nancha': 'NCB',
'nanchang': 'NCG',
'nanchangxi': 'NXG',
'nancheng': 'NDG',
'nanchengsi': 'NSP',
'nanchong': 'NCW',
'nanchongbei': 'NCE',
'nandamiao': 'NMP',
'nandan': 'NDZ',
'nanfen': 'NFT',
'nanfenbei': 'NUT',
'nanfeng': 'NFG',
'nangongdong': 'NFP',
'nanguancun': 'NGP',
'nanguanling': 'NLT',
'nanhechuan': 'NHJ',
'nanhua': 'NHS',
'nanhudong': 'NDN',
'nanjiang': 'FIW',
'nanjiangkou': 'NDQ',
'nanjing': 'NJH',
'nanjingnan': 'NKH',
'nankou': 'NKP',
'nankouqian': 'NKT',
'nanlang': 'NNQ',
'nanling': 'LLH',
'nanmu': 'NMX',
'nanning': 'NNZ',
'nanningdong': 'NFZ',
'nanningxi': 'NXZ',
'nanping': 'NPS',
'nanpingbei': 'NBS',
'nanpingnan': 'NNS',
'nanqiao': 'NQD',
'nanqiu': 'NCK',
'nantai': 'NTT',
'nantong': 'NUH',
'nantou': 'NOQ',
'nanwanzi': 'NWP',
'nanxiangbei': 'NEH',
'nanxiong': 'NCQ',
'nanyang': 'NFF',
'nanyangzhai': 'NYF',
'nanyu': 'NUP',
'nanzamu': 'NZT',
'nanzhao': 'NAF',
'napu': 'NPZ',
'naqu': 'NQO',
'nayong': 'NYE',
'nehe': 'NHX',
'neijiang': 'NJW',
'neijiangbei': 'NKW',
'neixiang': 'NXF',
'nengjia': 'NJD',
'nenjiang': 'NGX',
'niangziguan': 'NIP',
'nianzishan': 'NZX',
'nihezi': 'NHD',
'nileke': 'NIR',
'nimu': 'NMO',
'ningan': 'NAB',
'ningbo': 'NGH',
'ningbodong': 'NVH',
'ningcun': 'NCZ',
'ningde': 'NES',
'ningdong': 'NOJ',
'ningdongnan': 'NDJ',
'ningguo': 'NNH',
'ninghai': 'NHH',
'ningjia': 'NVT',
'ninglingxian': 'NLF',
'ningming': 'NMZ',
'ningwu': 'NWV',
'ningxiang': 'NXQ',
'niujia': 'NJB',
'niuxintai': 'NXT',
'nongan': 'NAT',
'nuanquan': 'NQJ',
'paihuaibei': 'PHP',
'pananzhen': 'PAJ',
'panguan': 'PAM',
'panjiadian': 'PDP',
'panjin': 'PVD',
'panjinbei': 'PBD',
'panlongcheng': 'PNN',
'panshi': 'PSL',
'panzhihua': 'PRW',
'panzhou': 'PAE',
'paozi': 'POD',
'peide': 'PDB',
'pengan': 'PAW',
'pengshan': 'PSW',
'pengshanbei': 'PPW',
'pengshui': 'PHW',
'pengyang': 'PYJ',
'pengze': 'PZG',
'pengzhou': 'PMW',
'piandian': 'PRP',
'pianling': 'PNT',
'piaoertun': 'PRT',
'pikou': 'PUT',
'pikounan': 'PKT',
'pingan': 'PAL',
'pinganyi': 'PNO',
'pinganzhen': 'PZT',
'pingbanan': 'PBE',
'pingbian': 'PBM',
'pingchang': 'PCE',
'pingdingshan': 'PEN',
'pingdingshanxi': 'BFF',
'pingdu': 'PAK',
'pingfang': 'PFB',
'pinggang': 'PGL',
'pingguan': 'PGM',
'pingguo': 'PGZ',
'pinghekou': 'PHM',
'pinghu': 'PHQ',
'pingliang': 'PIJ',
'pingliangnan': 'POJ',
'pingnannan': 'PAZ',
'pingquan': 'PQP',
'pingshan': 'PSB',
'pingshang': 'PSK',
'pingshe': 'PSV',
'pingshi': 'PSQ',
'pingtai': 'PVT',
'pingtian': 'PTM',
'pingwang': 'PWV',
'pingxiang': 'PXG',
'pingxiangbei': 'PBG',
'pingxingguan': 'PGV',
'pingyang': 'PYX',
'pingyao': 'PYV',
'pingyaogucheng': 'PDV',
'pingyi': 'PIK',
'pingyu': 'PYP',
'pingyuan': 'PYK',
'pingyuanpu': 'PPJ',
'pingzhuang': 'PZD',
'pingzhuangnan': 'PND',
'pishan': 'PSR',
'pixian': 'PWW',
'pixianxi': 'PCW',
'pizhou': 'PJH',
'podixia': 'PXJ',
'puan': 'PAN',
'puanxian': 'PUE',
'pucheng': 'PCY',
'puchengdong': 'PEY',
'puding': 'PGW',
'pulandian': 'PLT',
'puning': 'PEQ',
'putaojing': 'PTW',
'putian': 'PTS',
'puwan': 'PWT',
'puxiong': 'POW',
'puyang': 'PYF',
'qianan': 'QQP',
'qianfeng': 'QFB',
'qianhe': 'QUY',
'qianjiang': 'QJN',
'qianjinzhen': 'QEB',
'qianmotou': 'QMP',
'qianshan': 'QXQ',
'qianwei': 'QWD',
'qianweitang': 'QWP',
'qianxian': 'QBY',
'qianyang': 'QOY',
'qiaotou': 'QAT',
'qiaoxi': 'QXJ',
'qichun': 'QRN',
'qidian': 'QDM',
'qidong': 'QMQ',
'qidongbei': 'QRQ',
'qifengta': 'QVP',
'qijiang': 'QJW',
'qijiapu': 'QBT',
'qilihe': 'QLD',
'qimen': 'QIH',
'qingan': 'QAB',
'qingbaijiangdong': 'QFW',
'qingchengshan': 'QSW',
'qingdao': 'QDK',
'qingdaobei': 'QHK',
'qingdui': 'QET',
'qingfeng': 'QFT',
'qinghe': 'QIP',
'qinghecheng': 'QYP',
'qinghemen': 'QHD',
'qinghuayuan': 'QHP',
'qingjianxian': 'QNY',
'qinglian': 'QEW',
'qinglong': 'QIB',
'qinglongshan': 'QGH',
'qingshan': 'QSB',
'qingshen': 'QVW',
'qingsheng': 'QSQ',
'qingshui': 'QUJ',
'qingshuibei': 'QEJ',
'qingtian': 'QVH',
'qingtongxia': 'QTJ',
'qingxian': 'QXP',
'qingxu': 'QUV',
'qingyangshan': 'QSJ',
'qingyuan': 'QBQ',
'qingzhoushi': 'QZK',
'qinhuangdao': 'QTP',
'qinjia': 'QJB',
'qinjiazhuang': 'QZV',
'qinling': 'QLY',
'qinxian': 'QVV',
'qinyang': 'QYF',
'qinzhou': 'QRZ',
'qinzhoudong': 'QDZ',
'qionghai': 'QYQ',
'qiqihaer': 'QHX',
'qiqihaernan': 'QNB',
'qishan': 'QAY',
'qishuyan': 'QYH',
'qitaihe': 'QTB',
'qixian': 'QXV',
'qixiandong': 'QGV',
'qixiaying': 'QXC',
'qiyang': 'QWQ',
'qiyangbei': 'QVQ',
'qiying': 'QYJ',
'qiziwan': 'QZQ',
'quanjiao': 'INH',
'quanyang': 'QYL',
'quanzhou': 'QYS',
'quanzhoudong': 'QRS',
'quanzhounan': 'QNZ',
'queshan': 'QSN',
'qufu': 'QFK',
'qufudong': 'QAK',
'qujiang': 'QIM',
'qujing': 'QJM',
'qujiu': 'QJZ',
'quli': 'QLZ',
'qushuixian': 'QSO',
'quxian': 'QRW',
'quzhou': 'QEH',
'raoping': 'RVQ',
'raoyang': 'RVP',
'raoyanghe': 'RHD',
'renbu': 'RUO',
'renqiu': 'RQP',
'reshui': 'RSD',
'rikaze': 'RKO',
'rizhao': 'RZK',
'rongan': 'RAZ',
'rongchang': 'RCW',
'rongchangbei': 'RQW',
'rongcheng': 'RCK',
'ronggui': 'RUQ',
'rongjiang': 'RVW',
'rongshui': 'RSZ',
'rongxian': 'RXZ',
'rudong': 'RIH',
'rugao': 'RBH',
'ruian': 'RAH',
'ruichang': 'RCG',
'ruijin': 'RJG',
'rujigou': 'RQJ',
'rushan': 'ROK',
'ruyang': 'RYF',
'ruzhou': 'ROF',
'saihantala': 'SHC',
'salaqi': 'SLC',
'sandaohu': 'SDL',
'sanduxian': 'KKW',
'sanggendalai': 'OGC',
'sanguankou': 'OKJ',
'sangyuanzi': 'SAJ',
'sanhexian': 'OXP',
'sanhezhuang': 'SVP',
'sanhuizhen': 'OZW',
'sanjiadian': 'ODP',
'sanjianfang': 'SFX',
'sanjiangkou': 'SKD',
'sanjiangnan': 'SWZ',
'sanjiangxian': 'SOZ',
'sanjiazhai': 'SMM',
'sanjingzi': 'OJT',
'sanmenxia': 'SMF',
'sanmenxian': 'OQH',
'sanmenxianan': 'SCF',
'sanmenxiaxi': 'SXF',
'sanming': 'SMS',
'sanmingbei': 'SHS',
'sanshijia': 'SRD',
'sanshilipu': 'SST',
'sanshui': 'SJQ',
'sanshuibei': 'ARQ',
'sanshuinan': 'RNQ',
'sansui': 'QHW',
'santangji': 'SDH',
'sanya': 'SEQ',
'sanyangchuan': 'SYJ',
'sanyijing': 'OYD',
'sanying': 'OEJ',
'sanyuan': 'SAY',
'sanyuanpu': 'SYL',
'shache': 'SCR',
'shacheng': 'SCP',
'shahai': 'SED',
'shahe': 'SHP',
'shahekou': 'SKT',
'shaheshi': 'VOP',
'shahousuo': 'SSD',
'shalingzi': 'SLP',
'shanchengzhen': 'SCL',
'shandan': 'SDJ',
'shangbancheng': 'SBP',
'shangbanchengnan': 'OBP',
'shangcheng': 'SWN',
'shangdu': 'SXC',
'shanggaozhen': 'SVK',
'shanghai': 'SHH',
'shanghaihongqiao': 'AOH',
'shanghainan': 'SNH',
'shanghaixi': 'SXH',
'shanghang': 'JBS',
'shanghe': 'SOK',
'shangjia': 'SJB',
'shangluo': 'OLY',
'shangnan': 'ONY',
'shangqiu': 'SQF',
'shangqiunan': 'SPF',
'shangrao': 'SRG',
'shangwan': 'SWP',
'shangxipu': 'SXM',
'shangyaodun': 'SPJ',
'shangyu': 'BDH',
'shangyuan': 'SUD',
'shangzhi': 'SZB',
'shanhaiguan': 'SHD',
'shanhetun': 'SHL',
'shanpodong': 'SBN',
'shanshan': 'SSR',
'shanshanbei': 'SMR',
'shanshi': 'SQB',
'shantou': 'OTQ',
'shanwei': 'OGQ',
'shanyin': 'SNV',
'shaodong': 'FIQ',
'shaoguan': 'SNQ',
'shaoguandong': 'SGQ',
'shaojiatang': 'SJJ',
'shaoshan': 'SSQ',
'shaoshannan': 'INQ',
'shaowu': 'SWS',
'shaoxing': 'SOH',
'shaoxingbei': 'SLH',
'shaoxingdong': 'SSH',
'shaoyang': 'SYQ',
'shaoyangbei': 'OVQ',
'shapotou': 'SFJ',
'shaqiao': 'SQM',
'shatuo': 'SFM',
'shawanxian': 'SXR',
'shaxian': 'SAS',
'shelihu': 'VLD',
'shenchi': 'SMV',
'shenfang': 'OLH',
'shengfang': 'SUP',
'shenjia': 'OJB',
'shenjiahe': 'OJJ',
'shenjingzi': 'SWT',
'shenmu': 'OMY',
'shenqiu': 'SQN',
'shenshu': 'SWB',
'shentou': 'SEV',
'shenyang': 'SYT',
'shenyangbei': 'SBT',
'shenyangdong': 'SDT',
'shenyangnan': 'SOT',
'shenzhen': 'SZQ',
'shenzhenbei': 'IOQ',
'shenzhendong': 'BJQ',
'shenzhenpingshan': 'IFQ',
'shenzhenxi': 'OSQ',
'shenzhou': 'OZP',
'shexian': 'OVH',
'shexianbei': 'NPH',
'shiba': 'OBJ',
'shibing': 'AQW',
'shiboyuan': 'ZWT',
'shicheng': 'SCT',
'shidu': 'SEP',
'shihezi': 'SZR',
'shijiazhuang': 'SJP',
'shijiazhuangbei': 'VVP',
'shijiazi': 'SJD',
'shijiazui': 'SHM',
'shijingshannan': 'SRP',
'shilidian': 'OMP',
'shilin': 'SPB',
'shiling': 'SOL',
'shilinnan': 'LNM',
'shilong': 'SLQ',
'shimenxian': 'OMQ',
'shimenxianbei': 'VFQ',
'shiqiao': 'SQE',
'shiqiaozi': 'SQT',
'shiquanxian': 'SXY',
'shiren': 'SRL',
'shirencheng': 'SRB',
'shishan': 'KSQ',
'shishanbei': 'NSQ',
'shiti': 'STE',
'shitou': 'OTB',
'shixian': 'SXL',
'shixiazi': 'SXJ',
'shixing': 'IPQ',
'shiyan': 'SNN',
'shizhuang': 'SNM',
'shizhuxian': 'OSW',
'shizong': 'SEM',
'shizuishan': 'QQJ',
'shoushan': 'SAT',
'shouyang': 'SYV',
'shuangchengbei': 'SBB',
'shuangchengpu': 'SCB',
'shuangfeng': 'OFB',
'shuangfengbei': 'NFQ',
'shuanghezhen': 'SEL',
'shuangji': 'SML',
'shuangliao': 'ZJD',
'shuangliujichang': 'IPW',
'shuangliuxi': 'IQW',
'shuangpai': 'SBZ',
'shuangyashan': 'SSB',
'shucheng': 'OCH',
'shuidong': 'SIL',
'shuifu': 'OTW',
'shuijiahu': 'SQH',
'shuiquan': 'SID',
'shuiyang': 'OYP',
'shuiyuan': 'OYJ',
'shulan': 'SLL',
'shule': 'SUR',
'shulehe': 'SHJ',
'shunchang': 'SCS',
'shunde': 'ORQ',
'shundexueyuan': 'OJQ',
'shunyi': 'SOP',
'shuozhou': 'SUV',
'shuyang': 'FMH',
'sidaowan': 'OUD',
'sifangtai': 'STB',
'siheyong': 'OHD',
'sihong': 'GQH',
'sihui': 'AHQ',
'sijialing': 'OLK',
'siping': 'SPT',
'sipingdong': 'PPT',
'sishui': 'OSK',
'sixian': 'GPH',
'siyang': 'MPH',
'song': 'SOB',
'songchenglu': 'SFF',
'songhe': 'SBM',
'songjiang': 'SAH',
'songjianghe': 'SJL',
'songjiangnan': 'IMH',
'songjiangzhen': 'OZL',
'songshu': 'SFT',
'songshuzhen': 'SSL',
'songtao': 'MZQ',
'songyuan': 'VYT',
'songyuanbei': 'OCT',
'songzi': 'SIN',
'suide': 'ODY',
'suifenhe': 'SFB',
'suihua': 'SHB',
'suiling': 'SIB',
'suining': 'NIW',
'suiping': 'SON',
'suixi': 'SXZ',
'suiyang': 'SYB',
'suizhong': 'SZD',
'suizhongbei': 'SND',
'suizhou': 'SZN',
'sujiatun': 'SXT',
'suning': 'SYP',
'sunitezuoqi': 'ONC',
'sunjia': 'SUB',
'sunwu': 'SKB',
'sunzhen': 'OZY',
'suolun': 'SNT',
'suotuhan': 'SHX',
'susong': 'OAH',
'suzhou': 'SZH',
'suzhoubei': 'OHH',
'suzhoudong': 'SRH',
'suzhouxinqu': 'ITH',
'suzhouyuanqu': 'KAH',
'taerqi': 'TVX',
'taha': 'THX',
'tahe': 'TXX',
'taian': 'TID',
'taigu': 'TGV',
'taiguxi': 'TIV',
'taihe': 'THG',
'taihu': 'TKH',
'taikang': 'TKX',
'tailai': 'TLX',
'taimushan': 'TLS',
'taining': 'TNS',
'taipingchuan': 'TIT',
'taipingzhen': 'TEB',
'taiqian': 'TTK',
'taishan': 'TAK',
'taiyangshan': 'TYJ',
'taiyangsheng': 'TQT',
'taiyuan': 'TYV',
'taiyuanbei': 'TBV',
'taiyuandong': 'TDV',
'taiyuannan': 'TNV',
'taizhou': 'TZH',
'tancheng': 'TZK',
'tangbao': 'TBQ',
'tangchi': 'TCX',
'tanggu': 'TGP',
'tanghai': 'THM',
'tanghe': 'THF',
'tangjiawan': 'PDQ',
'tangshan': 'TSP',
'tangshanbei': 'FUP',
'tangshancheng': 'TCT',
'tangwanghe': 'THB',
'tangxunhu': 'THN',
'tangyin': 'TYF',
'tangyuan': 'TYB',
'tanjiajing': 'TNJ',
'taocun': 'TCK',
'taocunbei': 'TOK',
'taojiatun': 'TOT',
'taolaizhao': 'TPT',
'taonan': 'TVT',
'taoshan': 'TAB',
'tashizui': 'TIM',
'tayayi': 'TYP',
'tengxian': 'TAZ',
'tengzhou': 'TXK',
'tengzhoudong': 'TEK',
'tiandong': 'TDZ',
'tiandongbei': 'TBZ',
'tiangang': 'TGL',
'tianhejichang': 'TJN',
'tianhejie': 'TEN',
'tianjin': 'TJP',
'tianjinbei': 'TBP',
'tianjinnan': 'TIP',
'tianjinxi': 'TXP',
'tianlin': 'TFZ',
'tianmen': 'TMN',
'tianmennan': 'TNN',
'tianqiaoling': 'TQL',
'tianshifu': 'TFT',
'tianshui': 'TSJ',
'tianyang': 'TRZ',
'tianyi': 'TND',
'tianzhen': 'TZV',
'tianzhu': 'TZJ',
'tianzhushan': 'QWH',
'tiechang': 'TCL',
'tieli': 'TLB',
'tieling': 'TLT',
'tielingxi': 'PXT',
'tingliang': 'TIZ',
'tonganyi': 'TAJ',
'tongbai': 'TBF',
'tongbei': 'TBB',
'tongcheng': 'TTH',
'tongdao': 'TRQ',
'tonggou': 'TOL',
'tongguan': 'TGY',
'tonghai': 'TAM',
'tonghua': 'THL',
'tonghuaxian': 'TXL',
'tongjiang': 'TJB',
'tongjunzhuang': 'TZP',
'tongliao': 'TLD',
'tongling': 'TJH',
'tonglingbei': 'KXH',
'tongnan': 'TVW',
'tongren': 'RDQ',
'tongrennan': 'TNW',
'tongtu': 'TUT',
'tongxiang': 'TCH',
'tongxin': 'TXJ',
'tongyuanpu': 'TYT',
'tongyuanpuxi': 'TST',
'tongzhouxi': 'TAP',
'tongzi': 'TZW',
'tongzilin': 'TEW',
'tuanjie': 'TIX',
'tuditangdong': 'TTN',
'tuguiwula': 'TGC',
'tuha': 'THR',
'tuliemaodu': 'TMD',
'tulihe': 'TEX',
'tulufan': 'TFR',
'tulufanbei': 'TAR',
'tumen': 'TML',
'tumenbei': 'QSL',
'tumenzi': 'TCJ',
'tumuertai': 'TRC',
'tuoyaoling': 'TIL',
'tuqiang': 'TQX',
'tuqiaozi': 'TQJ',
'tuxi': 'TSW',
'wafangdian': 'WDT',
'wafangdianxi': 'WXT',
'waitoushan': 'WIT',
'walagan': 'WVX',
'wanfatun': 'WFB',
'wanganzhen': 'WVP',
'wangcang': 'WEW',
'wangdu': 'WDP',
'wangfu': 'WUT',
'wanggang': 'WGB',
'wangjiawan': 'WJJ',
'wangjiayingxi': 'KNM',
'wangou': 'WGL',
'wangqing': 'WQL',
'wangtong': 'WTP',
'wangtuanzhuang': 'WZJ',
'wangyang': 'WYB',
'wangzhaotun': 'WZB',
'wanle': 'WEB',
'wannian': 'WWG',
'wanning': 'WNQ',
'wanyuan': 'WYY',
'wanzhou': 'WYW',
'wanzhoubei': 'WZE',
'wawushan': 'WAH',
'wayaotian': 'WIM',
'weidong': 'WVT',
'weifang': 'WFK',
'weihai': 'WKK',
'weihaibei': 'WHK',
'weihe': 'WHB',
'weihui': 'WHF',
'weihulingbei': 'WBL',
'weijin': 'WJL',
'weinan': 'WNY',
'weinanbei': 'WBY',
'weinannan': 'WVY',
'weinanzhen': 'WNJ',
'weiqing': 'WAM',
'weishanzhuang': 'WSP',
'weishe': 'WSM',
'weixing': 'WVB',
'weizhangzi': 'WKD',
'weizhuang': 'WZY',
'weizigou': 'WZL',
'weizizhen': 'WQP',
'wenan': 'WBP',
'wenchang': 'WEQ',
'wenchun': 'WDB',
'wendeng': 'WBK',
'wendengdong': 'WGK',
'wendi': 'WNZ',
'wenling': 'VHH',
'wenshui': 'WEV',
'wenxi': 'WXV',
'wenxixi': 'WOV',
'wenzhou': 'RZH',
'wenzhounan': 'VRH',
'woken': 'WQB',
'wolitun': 'WLX',
'wopi': 'WPT',
'wuan': 'WAP',
'wuchagou': 'WCT',
'wuchang': 'WCB',
'wudalianchi': 'WRB',
'wudangshan': 'WRN',
'wudaogou': 'WDL',
'wudaohe': 'WHP',
'wuerqihan': 'WHX',
'wufushan': 'WFG',
'wugong': 'WGY',
'wuguantian': 'WGM',
'wuhai': 'WVC',
'wuhaixi': 'WXC',
'wuhan': 'WHN',
'wuhu': 'WHH',
'wuji': 'WJP',
'wujia': 'WUB',
'wujiachuan': 'WCJ',
'wujiatun': 'WJT',
'wukeshu': 'WKT',
'wulanhada': 'WLC',
'wulanhaote': 'WWT',
'wulashan': 'WSC',
'wulateqianqi': 'WQC',
'wulian': 'WLK',
'wulong': 'WLW',
'wulongbei': 'WBT',
'wulongbeidong': 'WMT',
'wulongquannan': 'WFN',
'wulumuqi': 'WAR',
'wulumuqinan': 'WMR',
'wunuer': 'WRX',
'wunvshan': 'WET',
'wupu': 'WUY',
'wuqiao': 'WUP',
'wuqing': 'WWP',
'wushan': 'WSJ',
'wusheng': 'WSE',
'wutaishan': 'WSV',
'wuwei': 'IIH',
'wuweinan': 'WWJ',
'wuwu': 'WVR',
'wuxi': 'WXR',
'wuxiang': 'WVV',
'wuxidong': 'WGH',
'wuxixinqu': 'IFH',
'wuxu': 'WYZ',
'wuxue': 'WXN',
'wuyi': 'RYH',
'wuyibei': 'WDH',
'wuyiling': 'WPB',
'wuying': 'WWB',
'wuyishan': 'WAS',
'wuyishanbei': 'WBS',
'wuyishandong': 'WCS',
'wuyuan': 'WYG',
'wuzhai': 'WZV',
'wuzhi': 'WIF',
'wuzhou': 'WZZ',
'wuzhounan': 'WBZ',
'xiabancheng': 'EBP',
'xiachengzi': 'XCB',
'xiaguanying': 'XGJ',
'xiahuayuan': 'XYP',
'xiajiang': 'EJG',
'xiamatang': 'XAT',
'xiamen': 'XMS',
'xiamenbei': 'XKS',
'xiamengaoqi': 'XBS',
'xian': 'XAY',
'xianbei': 'EAY',
'xiangcheng': 'ERN',
'xiangfang': 'XFB',
'xiangfen': 'XFV',
'xiangfenxi': 'XTV',
'xianghe': 'XXB',
'xianglan': 'XNB',
'xiangtan': 'XTQ',
'xiangtanbei': 'EDQ',
'xiangtang': 'XTG',
'xiangxiang': 'XXQ',
'xiangyang': 'XFN',
'xiangyangdong': 'XWN',
'xiangyuan': 'EIF',
'xiangyun': 'EXM',
'xianlin': 'XPH',
'xiannan': 'CAY',
'xianning': 'XNN',
'xianningbei': 'XRN',
'xianningdong': 'XKN',
'xianningnan': 'UNN',
'xianrenqiao': 'XRL',
'xiantaoxi': 'XAN',
'xianyang': 'XYY',
'xianyangqindu': 'XOY',
'xianyou': 'XWS',
'xiaocun': 'XEM',
'xiaodejiang': 'EJM',
'xiaodong': 'XEZ',
'xiaogan': 'XGN',
'xiaoganbei': 'XJN',
'xiaogandong': 'GDN',
'xiaoheyan': 'XYD',
'xiaohezhen': 'EKY',
'xiaojinkou': 'NKQ',
'xiaolan': 'EAQ',
'xiaoling': 'XLB',
'xiaonan': 'XNV',
'xiaoshao': 'XAM',
'xiaoshi': 'XST',
'xiaosigou': 'ESP',
'xiaoxi': 'XOV',
'xiaoxianbei': 'QSH',
'xiaoxinjie': 'XXM',
'xiaoxizhuang': 'XXP',
'xiaoyangqi': 'XYX',
'xiaoyuejiu': 'XFM',
'xiaoyugu': 'XHM',
'xiapu': 'XOS',
'xiashe': 'XSV',
'xiashi': 'XIZ',
'xiataizi': 'EIP',
'xiayixian': 'EJH',
'xibali': 'XLP',
'xichang': 'ECW',
'xichangnan': 'ENW',
'xidamiao': 'XMP',
'xide': 'EDW',
'xiehejian': 'EEP',
'xiejiazhen': 'XMT',
'xifeng': 'XFT',
'xigangzi': 'NBB',
'xigu': 'XIJ',
'xigucheng': 'XUJ',
'xihudong': 'WDQ',
'xijiekou': 'EKM',
'xilin': 'XYB',
'xilinhaote': 'XTC',
'xiliu': 'GCT',
'ximashan': 'XMB',
'xinan': 'EAM',
'xinanxian': 'XAF',
'xinbaoan': 'XAP',
'xinchengzi': 'XCT',
'xinchuoyuan': 'XRX',
'xindudong': 'EWW',
'xinfeng': 'EFG',
'xingan': 'EGG',
'xinganbei': 'XDZ',
'xingcheng': 'XCD',
'xingguo': 'EUG',
'xinghexi': 'XEC',
'xingkai': 'EKB',
'xinglongdian': 'XDD',
'xinglongxian': 'EXP',
'xinglongzhen': 'XZB',
'xingning': 'ENQ',
'xingping': 'XPY',
'xingquanbu': 'XQJ',
'xingshu': 'XSB',
'xingshutun': 'XDT',
'xingtai': 'XTP',
'xingtaidong': 'EDP',
'xingye': 'SNZ',
'xingyi': 'XRZ',
'xinhe': 'XIR',
'xinhua': 'EHQ',
'xinhuanan': 'EJQ',
'xinhuang': 'XLQ',
'xinhuangxi': 'EWQ',
'xinhuatun': 'XAX',
'xinhui': 'EFQ',
'xining': 'XNO',
'xinji': 'ENP',
'xinjiang': 'XJM',
'xinjin': 'IRW',
'xinjinnan': 'ITW',
'xinle': 'ELP',
'xinli': 'XLJ',
'xinlin': 'XPX',
'xinlitun': 'XLD',
'xinlizhen': 'XGT',
'xinmin': 'XMD',
'xinpingtian': 'XPM',
'xinqing': 'XQB',
'xinqiu': 'XQD',
'xinsongpu': 'XOB',
'xinwopu': 'EPD',
'xinxian': 'XSN',
'xinxiang': 'XXF',
'xinxiangdong': 'EGF',
'xinxingxian': 'XGQ',
'xinyang': 'XUN',
'xinyangdong': 'OYN',
'xinyangzhen': 'XZJ',
'xinyi': 'EEQ',
'xinyouyi': 'EYB',
'xinyu': 'XUG',
'xinyubei': 'XBG',
'xinzhangfang': 'XZX',
'xinzhangzi': 'ERP',
'xinzhao': 'XZT',
'xinzhengjichang': 'EZF',
'xinzhou': 'XXV',
'xiongyuecheng': 'XYT',
'xiping': 'XPN',
'xipu': 'XIW',
'xipudong': 'XAW',
'xishui': 'XZN',
'xiushan': 'ETW',
'xiuwu': 'XWF',
'xiuwuxi': 'EXF',
'xiwuqi': 'XWC',
'xixia': 'XIF',
'xixian': 'ENN',
'xixiang': 'XQY',
'xixiaozhao': 'XZC',
'xiyangcun': 'XQF',
'xizhelimu': 'XRD',
'xizi': 'XZD',
'xuancheng': 'ECH',
'xuangang': 'XGV',
'xuanhan': 'XHY',
'xuanhe': 'XWJ',
'xuanhua': 'XHP',
'xuanwei': 'XWM',
'xuanzhong': 'XRP',
'xuchang': 'XCF',
'xuchangdong': 'XVF',
'xujia': 'XJB',
'xujiatai': 'XTJ',
'xujiatun': 'XJT',
'xunyang': 'XUY',
'xunyangbei': 'XBY',
'xupu': 'EPQ',
'xupunan': 'EMQ',
'xusanwan': 'XSJ',
'xushui': 'XSP',
'xuwen': 'XJQ',
'xuzhou': 'XCH',
'xuzhoudong': 'UUH',
'yabuli': 'YBB',
'yabulinan': 'YWB',
'yakeshi': 'YKX',
'yalongwan': 'TWQ',
'yanan': 'YWY',
'yancheng': 'YEK',
'yanchi': 'YKJ',
'yanchuan': 'YYY',
'yandangshan': 'YGH',
'yangang': 'YGW',
'yangcao': 'YAB',
'yangcaodi': 'YKM',
'yangcha': 'YAL',
'yangchang': 'YED',
'yangcheng': 'YNF',
'yangchenghu': 'AIH',
'yangchun': 'YQQ',
'yangcun': 'YBP',
'yanggang': 'YRB',
'yanggao': 'YOV',
'yanggu': 'YIK',
'yanghe': 'GTH',
'yangjiuhe': 'YHM',
'yanglin': 'YLM',
'yangling': 'YSY',
'yanglingnan': 'YEY',
'yangliuqing': 'YQP',
'yangmingbu': 'YVV',
'yangpingguan': 'YAY',
'yangpu': 'ABM',
'yangqu': 'YQV',
'yangquan': 'AQP',
'yangquanbei': 'YPP',
'yangquanqu': 'YYV',
'yangshuling': 'YAD',
'yangshuo': 'YCZ',
'yangweishao': 'YWM',
'yangxin': 'YVK',
'yangyi': 'ARP',
'yangzhangzi': 'YZD',
'yangzhewo': 'AEM',
'yangzhou': 'YLH',
'yanhecheng': 'YHP',
'yanhui': 'AEP',
'yanji': 'YJL',
'yanjiao': 'AJP',
'yanjiazhuang': 'AZK',
'yanjin': 'AEW',
'yanjixi': 'YXL',
'yanliang': 'YNY',
'yanling': 'YAG',
'yanqi': 'YSR',
'yanqing': 'YNP',
'yanshan': 'AOP',
'yanshi': 'YSF',
'yantai': 'YAK',
'yantainan': 'YLK',
'yantongshan': 'YSL',
'yantongtun': 'YUX',
'yanzhou': 'YZK',
'yanzibian': 'YZY',
'yaoan': 'YAC',
'yaojia': 'YAT',
'yaoqianhutun': 'YQT',
'yaoshang': 'ASP',
'yatunpu': 'YTZ',
'yayuan': 'YYL',
'yazhou': 'YUQ',
'yebaishou': 'YBD',
'yecheng': 'YER',
'yesanpo': 'AIP',
'yian': 'YAX',
'yibin': 'YBW',
'yichang': 'YCN',
'yichangdong': 'HAN',
'yicheng': 'YIN',
'yichun': 'YEG',
'yichunxi': 'YCG',
'yiershi': 'YET',
'yijiang': 'RVH',
'yijianpu': 'YJT',
'yilaha': 'YLX',
'yiliang': 'ALW',
'yiliangbei': 'YSM',
'yilin': 'YLB',
'yima': 'YMF',
'yimianpo': 'YPB',
'yimianshan': 'YST',
'yimin': 'YMX',
'yinai': 'YVM',
'yinan': 'YNK',
'yinchuan': 'YIJ',
'yindi': 'YDM',
'yingbinlu': 'YFW',
'yingcheng': 'YHN',
'yingchengzi': 'YCT',
'yingchun': 'YYB',
'yingde': 'YDQ',
'yingdexi': 'IIQ',
'yingjie': 'YAM',
'yingjisha': 'YIR',
'yingkou': 'YKT',
'yingkoudong': 'YGT',
'yingpanshui': 'YZJ',
'yingshan': 'NUW',
'yingshouyingzi': 'YIP',
'yingtan': 'YTG',
'yingtanbei': 'YKG',
'yingxian': 'YZV',
'yining': 'YMR',
'yiningdong': 'YNR',
'yinlang': 'YJX',
'yinping': 'KPQ',
'yintan': 'CTQ',
'yishui': 'YUK',
'yitulihe': 'YEX',
'yiwu': 'YWH',
'yixian': 'YXD',
'yixing': 'YUH',
'yiyang': 'YIG',
'yizheng': 'UZH',
'yizhou': 'YSZ',
'yizi': 'YQM',
'yongan': 'YAS',
'yonganxiang': 'YNB',
'yongchengbei': 'RGH',
'yongchuan': 'YCW',
'yongchuandong': 'WMW',
'yongdeng': 'YDJ',
'yongding': 'YGS',
'yongfengying': 'YYM',
'yongfunan': 'YBZ',
'yongji': 'YIV',
'yongjia': 'URH',
'yongjibei': 'AJV',
'yongkang': 'RFH',
'yongkangnan': 'QUH',
'yonglang': 'YLW',
'yongledian': 'YDY',
'yongshou': 'ASY',
'yongtai': 'YTS',
'yongxiu': 'ACG',
'yongzhou': 'AOQ',
'youhao': 'YOB',
'youxi': 'YXS',
'youxian': 'YOG',
'youxiannan': 'YXG',
'youyang': 'AFW',
'yuanbaoshan': 'YUD',
'yuandun': 'YAJ',
'yuanmou': 'YMM',
'yuanping': 'YPV',
'yuanqian': 'AQK',
'yuanshi': 'YSP',
'yuantan': 'YTQ',
'yuanyangzhen': 'YYJ',
'yucheng': 'YCK',
'yuchengxian': 'IXH',
'yuci': 'YCV',
'yudu': 'YDG',
'yuechi': 'AWW',
'yuejiajing': 'YGJ',
'yueliangtian': 'YUM',
'yueqing': 'UPH',
'yueshan': 'YBF',
'yuexi': 'YHW',
'yueyang': 'YYQ',
'yueyangdong': 'YIQ',
'yuge': 'VTM',
'yuhang': 'EVH',
'yujiang': 'YHG',
'yujiapu': 'YKP',
'yuliangpu': 'YLD',
'yulin': 'YLZ',
'yumen': 'YXJ',
'yunan': 'YKQ',
'yuncailing': 'ACP',
'yuncheng': 'YPK',
'yunchengbei': 'ABV',
'yundonghai': 'NAQ',
'yunfudong': 'IXQ',
'yunjusi': 'AFP',
'yunlianghe': 'YEF',
'yunmeng': 'YMN',
'yunshan': 'KZQ',
'yunxiao': 'YBS',
'yuping': 'YZW',
'yuquan': 'YQB',
'yushan': 'YNG',
'yushannan': 'YGG',
'yushe': 'AUM',
'yushi': 'YSJ',
'yushu': 'YRT',
'yushugou': 'YGP',
'yushutai': 'YUT',
'yushutun': 'YSX',
'yutianxian': 'ATP',
'yuxi': 'AXM',
'yuxixi': 'YXM',
'yuyao': 'YYH',
'yuyaobei': 'CTH',
'zaolin': 'ZIV',
'zaoqiang': 'ZVP',
'zaoyang': 'ZYN',
'zaozhuang': 'ZEK',
'zaozhuangdong': 'ZNK',
'zaozhuangxi': 'ZFK',
'zengjiapingzi': 'ZBW',
'zengkou': 'ZKE',
'zepu': 'ZPR',
'zerunli': 'ZLM',
'zhalainuoerxi': 'ZXX',
'zhalantun': 'ZTX',
'zhalute': 'ZLD',
'zhangbaiwan': 'ZUP',
'zhangdang': 'ZHT',
'zhanggutai': 'ZGD',
'zhangjiajie': 'DIQ',
'zhangjiakou': 'ZKP',
'zhangjiakounan': 'ZMP',
'zhanglan': 'ZLV',
'zhangmutou': 'ZOQ',
'zhangmutoudong': 'ZRQ',
'zhangping': 'ZPS',
'zhangpu': 'ZCS',
'zhangqiao': 'ZQY',
'zhangqiu': 'ZTK',
'zhangshu': 'ZSG',
'zhangshudong': 'ZOG',
'zhangweitun': 'ZWB',
'zhangwu': 'ZWD',
'zhangxin': 'ZIP',
'zhangye': 'ZYJ',
'zhangyexi': 'ZEJ',
'zhangzhou': 'ZUS',
'zhangzhoudong': 'GOS',
'zhanjiang': 'ZJZ',
'zhanjiangxi': 'ZWQ',
'zhaoan': 'ZDS',
'zhaobai': 'ZBP',
'zhaocheng': 'ZCV',
'zhaodong': 'ZDB',
'zhaofupu': 'ZFM',
'zhaoguang': 'ZGB',
'zhaohua': 'ZHW',
'zhaoqing': 'ZVQ',
'zhaoqingdong': 'FCQ',
'zhaotong': 'ZDW',
'zhashui': 'ZSY',
'zhazi': 'ZAL',
'zhelimu': 'ZLC',
'zhenan': 'ZEY',
'zhenchengdi': 'ZDV',
'zhengding': 'ZDP',
'zhengdingjichang': 'ZHP',
'zhengxiangbaiqi': 'ZXC',
'zhengzhou': 'ZZF',
'zhengzhoudong': 'ZAF',
'zhengzhouxi': 'XPF',
'zhenjiang': 'ZJH',
'zhenjiangnan': 'ZEH',
'zhenlai': 'ZLT',
'zhenping': 'ZPF',
'zhenxi': 'ZVT',
'zhenyuan': 'ZUW',
'zhian': 'ZAD',
'zhicheng': 'ZCN',
'zhifangdong': 'ZMN',
'zhijiang': 'ZPQ',
'zhijiangbei': 'ZIN',
'zhijin': 'IZW',
'zhijinbei': 'ZJE',
'zhongchuanjichang': 'ZJJ',
'zhonghe': 'ZHX',
'zhonghuamen': 'VNH',
'zhongjiacun': 'ZJY',
'zhongkai': 'KKQ',
'zhongmu': 'ZGF',
'zhongning': 'VNJ',
'zhongningdong': 'ZDJ',
'zhongningnan': 'ZNJ',
'zhongshan': 'ZSZ',
'zhongshanbei': 'ZGQ',
'zhongshanxi': 'ZAZ',
'zhongwei': 'ZWJ',
'zhongxiang': 'ZTN',
'zhongzhai': 'ZZM',
'zhoujia': 'ZOB',
'zhoujiatun': 'ZOD',
'zhoukou': 'ZKN',
'zhoushuizi': 'ZIT',
'zhuanghebei': 'ZUT',
'zhuangqiao': 'ZQH',
'zhuangzhi': 'ZUX',
'zhucheng': 'ZQK',
'zhuhai': 'ZHQ',
'zhuhaibei': 'ZIQ',
'zhuji': 'ZDH',
'zhujiagou': 'ZUB',
'zhujiawan': 'CWJ',
'zhujiayao': 'ZUJ',
'zhumadian': 'ZDN',
'zhumadianxi': 'ZLN',
'zhuozhou': 'ZXP',
'zhuozhoudong': 'ZAP',
'zhuozidong': 'ZDC',
'zhuozishan': 'ZZC',
'zhurihe': 'ZRC',
'zhuwo': 'ZOP',
'zhuyangxi': 'ZXW',
'zhuyuanba': 'ZAW',
'zhuzhou': 'ZZQ',
'zhuzhouxi': 'ZAQ',
'zibo': 'ZBK',
'zichang': 'ZHY',
'zigong': 'ZGW',
'zijingguan': 'ZYP',
'zixi': 'ZXS',
'ziyang': 'ZYW',
'ziyangbei': 'FYW',
'zizhong': 'ZZW',
'zizhongbei': 'WZW',
'zizhou': 'ZZY',
'zongxi': 'ZOY',
'zoucheng': 'ZIK',
'zunyi': 'ZIW',
'zuoling': 'ZSN'}
|
fancyqlx/fancy12306
|
fancy12306/stations.py
|
Python
|
mit
| 58,597
|
[
"ADF",
"ASE",
"VTK"
] |
ea9221cce86739fe8e36422be7ebb14c934c57e83c09c5afee78cb12c73578a8
|
# Author: Carlos Xavier Hernandez <cxh@stanford.edu>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
import numpy as np
import collections
from ..base import BaseEstimator
from ..utils import check_iter_of_sequences
class MultiSequencePreprocessingMixin(BaseEstimator):
# The API for the scikit-learn preprocessing object is, in fit(), that
# they take a single 2D array of shape (n_data_points, n_features).
#
# For reducing a collection of timeseries, we need to preserve
# the structure of which data_point came from which sequence. If
# we concatenate the sequences together, we lose that information.
#
# This mixin is basically a little "adaptor" that changes fit()
# so that it accepts a list of sequences. Its implementation
# concatenates the sequences, calls the superclass fit(), and
# then splits the labels_ back into the sequenced form.
#
# This code is copied and modified from cluster.MultiSequenceClusterMixin
def fit(self, sequences, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
y : None
Ignored
Returns
-------
self
"""
check_iter_of_sequences(sequences)
s = super(MultiSequencePreprocessingMixin, self)
s.fit(self._concat(sequences))
return self
def _concat(self, sequences):
self.__lengths = [len(s) for s in sequences]
# Indexing will fail on generic iterators
if not isinstance(sequences, collections.Sequence):
sequences = list(sequences)
if len(sequences) > 0 and isinstance(sequences[0], np.ndarray):
concat = np.concatenate(sequences)
else:
# if the input sequences are not numpy arrays, we need to guess
# how to concatenate them. this operation below works for mdtraj
# trajectories (which is the use case that I want to be sure to
# support), but in general the python container protocol doesn't
# give us a generic way to make sure we merged sequences
concat = sequences[0].join(sequences[1:])
assert sum(self.__lengths) == len(concat)
return concat
def _split(self, concat):
return [concat[cl - l: cl] for (cl, l) in
zip(np.cumsum(self.__lengths), self.__lengths)]
def transform(self, sequences):
"""Apply preprocessing to sequences
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Sequence data to transform, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
check_iter_of_sequences(sequences)
transforms = []
for X in sequences:
transforms.append(self.partial_transform(X))
return transforms
def fit_transform(self, sequences, y=None):
"""Fit the model and apply preprocessing
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
y : None
Ignored
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
self.fit(sequences)
transforms = self.transform(sequences)
return transforms
def partial_transform(self, sequence):
"""Apply preprocessing to single sequence
Parameters
----------
sequence: array like, shape (n_samples, n_features)
A single sequence to transform
Returns
-------
out : array like, shape (n_samples, n_features)
"""
s = super(MultiSequencePreprocessingMixin, self)
return s.transform(sequence)
def partial_fit(self, sequence, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
s = super(MultiSequencePreprocessingMixin, self)
return s.fit(sequence)
class MultiSequenceOnlinePreprocessingMixin(MultiSequencePreprocessingMixin):
def fit(self, sequences, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
y : None
Ignored
Returns
-------
self
"""
check_iter_of_sequences(sequences)
for sequence in sequences:
s = super(MultiSequencePreprocessingMixin, self)
s.partial_fit(sequence)
return self
|
mpharrigan/mixtape
|
msmbuilder/preprocessing/base.py
|
Python
|
lgpl-2.1
| 5,660
|
[
"MDTraj"
] |
679d896b010de4dc9cd61e720a8f784b36bf678c065b20f168288c156ea7f6a8
|
import sys
import requests
from requests.exceptions import HTTPError
import os
import multiprocessing
import time
import queue
from ddsc.core.localstore import HashUtil
from ddsc.core.ddsapi import DDS_TOTAL_HEADER
from ddsc.core.util import humanize_bytes, transfer_speed_str
SWIFT_EXPIRED_STATUS_CODE = 401
S3_EXPIRED_STATUS_CODE = 403
MISMATCHED_FILE_HASH_WARNING = """
NOTICE: Data Service reports multiple hashes for {}.
The downloaded files have been verified and confirmed to match one of these hashes.
You do not need to retry the download.
For more information, visit https://github.com/Duke-GCB/DukeDSClient/wiki/MD5-Hash-Conflicts.
"""
class MD5FileHash(object):
algorithm = 'md5'
@staticmethod
def get_hash_value(file_path):
hash_util = HashUtil()
hash_util.add_file(file_path)
return hash_util.hash.hexdigest()
class FileHash(object):
algorithm_to_get_hash_value = {
MD5FileHash.algorithm: MD5FileHash.get_hash_value
}
def __init__(self, algorithm, expected_hash_value, file_path):
self.algorithm = algorithm
self.expected_hash_value = expected_hash_value
self.file_path = file_path
def _get_hash_value(self):
get_hash_value_func = self.algorithm_to_get_hash_value.get(self.algorithm)
if get_hash_value_func:
return get_hash_value_func(self.file_path)
raise ValueError("Unsupported algorithm {}.".format(self.algorithm))
def is_valid(self):
return self._get_hash_value() == self.expected_hash_value
@staticmethod
def get_supported_file_hashes(dds_hashes, file_path):
"""
Returns a list of FileHashes for each dict in dds_hashes.
:param dds_hashes: [dict]: list of dicts with 'algorithm', and 'value' keys
:param file_path: str: path to file to have hash checked
:return: [FileHash]
"""
file_hashes = []
for hash_info in dds_hashes:
algorithm = hash_info.get('algorithm')
hash_value = hash_info.get('value')
if algorithm in FileHash.algorithm_to_get_hash_value:
file_hashes.append(FileHash(algorithm, hash_value, file_path))
return file_hashes
@staticmethod
def separate_valid_and_failed_hashes(file_hashes):
"""
Given a list of file hashes seperate them into a list of valid and a list of failed.
:param file_hashes: [FileHash]
:return: [FileHash], [FileHash]: valid_file_hashes, failed_file_hashes
"""
valid_file_hashes = []
failed_file_hashes = []
for file_hash in file_hashes:
if file_hash.is_valid():
valid_file_hashes.append(file_hash)
else:
failed_file_hashes.append(file_hash)
return valid_file_hashes, failed_file_hashes
class FileHashStatus(object):
STATUS_OK = "OK"
STATUS_WARNING = "WARNING"
STATUS_FAILED = "FAILED"
def __init__(self, file_hash, status):
self.file_hash = file_hash
self.status = status
def has_a_valid_hash(self):
return self.status in [self.STATUS_OK, self.STATUS_WARNING]
def get_status_line(self):
return "{} {} {} {}".format(self.file_hash.file_path,
self.file_hash.expected_hash_value,
self.file_hash.algorithm,
self.status)
def raise_for_status(self):
if self.status == self.STATUS_FAILED:
raise ValueError("Hash validation error: {}".format(self.get_status_line()))
@staticmethod
def determine_for_hashes(dds_hashes, file_path):
"""
Compares dds_hashes against file_path using the associated algorithms recording a status property.
The status property will bet set as follows:
STATUS_OK: there are only valid file hashes
STATUS_FAILED: there are only failed file hashes
STATUS_WARNING: there are both failed and valid hashes
Raises ValueError if no hashes found.
:param dds_hashes: [dict]: list of dicts with 'algorithm', and 'value' keys
:param file_path: str: path to file to have hash checked
:return: FileHashStatus
"""
file_hashes = FileHash.get_supported_file_hashes(dds_hashes, file_path)
valid_file_hashes, failed_file_hashes = FileHash.separate_valid_and_failed_hashes(file_hashes)
if valid_file_hashes:
first_ok_file_hash = valid_file_hashes[0]
if failed_file_hashes:
return FileHashStatus(first_ok_file_hash, FileHashStatus.STATUS_WARNING)
else:
return FileHashStatus(first_ok_file_hash, FileHashStatus.STATUS_OK)
else:
if failed_file_hashes:
first_failed_file_hash = failed_file_hashes[0]
return FileHashStatus(first_failed_file_hash, FileHashStatus.STATUS_FAILED)
raise ValueError("Unable to validate: No supported hashes found for file {}".format(file_path))
class FileDownloadState(object):
"""
Contains details passed between foreground ProjectFileDownloader and background download_file function
"""
NEW = 'new' # initial state before downloading
DOWNLOADING = 'downloading' # state when downloading
GOOD = 'good' # successfully download and verified the file's hash
ALREADY_COMPLETE = 'already_complete' # the file already exists and has a correct md5 sum
EXPIRED_URL = 'expired_url' # backend url expired before we got a chance to download it
ERROR = 'error' # an error occurred during download
def __init__(self, project_file, output_path, config):
self.file_id = project_file.id
self.size = project_file.size
self.hashes = project_file.hashes
self.output_path = output_path
self.url = project_file.file_url['host'] + project_file.file_url['url']
self.retries = config.file_download_retries
self.download_bytes_per_chunk = config.download_bytes_per_chunk
self.state = self.NEW
self.status = None
self.msg = 'New state'
def calculate_file_hash_status(self):
return FileHashStatus.determine_for_hashes(self.hashes, self.output_path)
def is_ok_state(self):
return self.state == self.GOOD or self.state == self.ALREADY_COMPLETE
def mark_good(self, status):
self.state = self.GOOD
self.status = status
self.msg = ''
return self
def mark_already_complete(self, status):
self.state = self.ALREADY_COMPLETE
self.status = status
self.msg = ''
return self
def mark_expired_url(self, msg):
self.state = self.EXPIRED_URL
self.status = None
self.msg = msg
return self
def mark_error(self, msg):
self.state = self.ERROR
self.status = None
self.msg = msg
return self
def raise_for_status(self):
if self.status:
self.status.raise_for_status()
else:
raise ValueError(self.msg)
class URLExpiredException(Exception):
pass
class ProjectFileDownloader(object):
def __init__(self, config, dest_directory, project, path_filter):
self.config = config
self.dest_directory = dest_directory
self.project = project
self.dds_connection = project.dds_connection
self.num_workers = config.download_workers
self.path_filter = path_filter
self.async_download_results = []
self.message_queue = multiprocessing.Manager().Queue()
self.files_downloaded = 0
self.files_to_download = None
self.file_download_statuses = {}
self.download_status_list = []
self.spinner_chars = "|/-\\"
self.start_time = None
def run(self):
self.start_time = time.time()
self._download_files()
self._show_downloaded_files_status()
def _download_files(self):
pool = multiprocessing.Pool(self.num_workers)
try:
for project_file in self._get_project_files():
self._download_file(pool, project_file)
while self._work_queue_is_full():
self._wait_for_and_retry_failed_downloads(pool)
while self._work_queue_is_not_empty():
self._wait_for_and_retry_failed_downloads(pool)
finally:
pool.close()
def _show_downloaded_files_status(self):
print("\nVerifying contents of {} downloaded files using file hashes.".format(self.files_to_download))
all_good = True
files_with_mismatched_hashes = 0
for download_status in self.download_status_list:
if not download_status.has_a_valid_hash():
all_good = False
if download_status.status == FileHashStatus.STATUS_WARNING:
files_with_mismatched_hashes += 1
print(download_status.get_status_line())
if all_good:
print("All downloaded files have been verified successfully.")
if files_with_mismatched_hashes:
print(MISMATCHED_FILE_HASH_WARNING.format(files_with_mismatched_hashes))
else:
raise ValueError("ERROR: Downloaded file(s) do not match the expected hashes.")
def _get_project_files(self):
project_files_generator = self.project.get_project_files_generator(self.config.page_size)
if self.path_filter:
# fetch all files so we can determine an accurate filtered count
project_files = self._filter_project_files(project_files_generator)
self._print_path_filter_warnings()
self.files_to_download = len(project_files)
self.show_progress_bar()
for project_file in project_files:
yield project_file
else:
for project_file, headers in project_files_generator:
if self.files_to_download is None:
self.files_to_download = int(headers.get(DDS_TOTAL_HEADER))
self.show_progress_bar()
yield project_file
def _filter_project_files(self, project_files_generator):
project_files = []
for project_file, headers in project_files_generator:
if self.path_filter.include_path(project_file.path):
project_files.append(project_file)
return project_files
def _print_path_filter_warnings(self):
if self.path_filter:
unused_paths = self.path_filter.get_unused_paths()
if unused_paths:
print('WARNING: Path(s) not found: {}.'.format(','.join(unused_paths)))
def _download_file(self, pool, project_file):
output_path = project_file.get_local_path(self.dest_directory)
output_path_parent = os.path.dirname(output_path)
if not os.path.exists(output_path_parent):
os.makedirs(output_path_parent)
file_download_state = FileDownloadState(project_file, output_path, self.config)
self._async_download_file(pool, file_download_state)
def _async_download_file(self, pool, file_download_state):
async_result = pool.apply_async(download_file, (file_download_state, self.message_queue))
self.async_download_results.append(async_result)
def _work_queue_is_full(self):
return len(self.async_download_results) >= self.num_workers
def _work_queue_is_not_empty(self):
return len(self.async_download_results) > 0
def _wait_for_and_retry_failed_downloads(self, pool):
download_results = self._pop_ready_download_results()
if download_results:
self._process_download_results(pool, download_results)
else:
self._try_process_message_queue()
time.sleep(0) # Pause to give up CPU since no results are ready
def _try_process_message_queue(self):
try:
file_id, bytes_downloaded, file_size, file_state = self.message_queue.get_nowait()
# This might be out of date for a little bit
self.file_download_statuses[file_id] = (bytes_downloaded, file_size, file_state)
self.show_progress_bar()
except queue.Empty:
pass
def show_progress_bar(self):
files_downloaded, total_bytes_downloaded = self.get_download_progress()
current_time = time.time()
bytes_progress = '{} {}'.format(
humanize_bytes(total_bytes_downloaded),
self.make_download_speed(current_time, total_bytes_downloaded))
sys.stdout.write("\r{} downloaded {} ({} of {} files complete)".format(
self.make_spinner_char(current_time),
bytes_progress.ljust(22),
files_downloaded,
self.files_to_download
))
sys.stdout.flush()
def make_spinner_char(self, current_time):
half_seconds = int(current_time)
return self.spinner_chars[half_seconds % 4]
def make_download_speed(self, current_time, total_bytes_downloaded):
return transfer_speed_str(
current_time=current_time,
start_time=self.start_time,
transferred_bytes=total_bytes_downloaded
)
def get_download_progress(self):
files_downloaded = 0
total_bytes_downloaded = 0
for file_id, download_info in self.file_download_statuses.items():
bytes_downloaded, file_size, file_state = download_info
# do not include files that were already downloaded in bytes downloaded
if file_state != FileDownloadState.ALREADY_COMPLETE:
total_bytes_downloaded += bytes_downloaded
if bytes_downloaded == file_size:
files_downloaded += 1
return files_downloaded, total_bytes_downloaded
def _pop_ready_download_results(self):
ready_results = []
for async_result in self._get_ready_async_results():
result = async_result.get()
# retrieve the value from the async result
ready_results.append(result)
# remove the async result from the list to watch
self.async_download_results.remove(async_result)
return ready_results
def _get_ready_async_results(self):
ready_results = []
for async_result in self.async_download_results:
if async_result.ready():
ready_results.append(async_result)
return ready_results
def _process_download_results(self, pool, download_results):
for file_download_state in download_results:
if file_download_state.is_ok_state():
file_id = file_download_state.file_id
size = file_download_state.size
status = file_download_state.status
self.file_download_statuses[file_id] = (size, size, file_download_state.state)
self.download_status_list.append(status)
elif file_download_state.retries:
file_download_state.retries -= 1
# Refresh url in file_download_state
file_download = self.dds_connection.get_file_download(file_download_state.file_id)
file_download_state.url = file_download.host + file_download.url
# Re-run download process
self._async_download_file(pool, file_download_state)
else:
raise ValueError("Error downloading {}\n{}".format(
file_download_state.output_path,
file_download_state.msg
))
self.show_progress_bar()
def download_file(file_download_state, message_queue=None):
if os.path.exists(file_download_state.output_path):
file_hash_status = file_download_state.calculate_file_hash_status()
if file_hash_status.has_a_valid_hash():
return file_download_state.mark_already_complete(file_hash_status)
try:
file_download_state.state = FileDownloadState.DOWNLOADING
written_size = download_url_to_path(file_download_state, message_queue)
return compute_download_result(file_download_state, written_size)
except URLExpiredException:
msg = 'Expired URL: {}'.format(file_download_state.url)
return file_download_state.mark_expired_url(msg)
except Exception as error:
return file_download_state.mark_error(msg=str(error))
def download_url_to_path(file_download_state, message_queue=None):
try:
response = requests.get(file_download_state.url, stream=True)
written_size = 0
response.raise_for_status()
with open(file_download_state.output_path, "wb") as outfile:
for chunk in response.iter_content(chunk_size=file_download_state.download_bytes_per_chunk):
if chunk: # filter out keep-alive new chunks
outfile.write(chunk)
written_size += len(chunk)
if message_queue:
message_queue.put((file_download_state.file_id, written_size, file_download_state.size,
file_download_state.state))
return written_size
except HTTPError:
if response.status_code == SWIFT_EXPIRED_STATUS_CODE or response.status_code == S3_EXPIRED_STATUS_CODE:
raise URLExpiredException()
raise
def compute_download_result(file_download_state, written_size):
if written_size == file_download_state.size:
file_hash_status = file_download_state.calculate_file_hash_status()
if file_hash_status.has_a_valid_hash():
return file_download_state.mark_good(file_hash_status)
else:
return file_download_state.mark_error(msg=file_hash_status.get_status_line())
else:
msg = "Downloaded file was wrong size. Expected: {} Actual: {}".format(file_download_state.size, written_size)
return file_download_state.mark_error(msg=msg)
|
Duke-GCB/DukeDSClient
|
ddsc/core/download.py
|
Python
|
mit
| 18,066
|
[
"VisIt"
] |
deab0ad832781932e9eb9285efbd6522ddff616d24effcfd9964ffe1e03c420d
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.256078
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/timeraddbyeventid.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class timeraddbyeventid(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(timeraddbyeventid, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_98586193 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>\t
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_98586193
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_timeraddbyeventid= 'respond'
## END CLASS DEFINITION
if not hasattr(timeraddbyeventid, '_initCheetahAttributes'):
templateAPIClass = getattr(timeraddbyeventid, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(timeraddbyeventid)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=timeraddbyeventid()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/timeraddbyeventid.py
|
Python
|
gpl-2.0
| 5,232
|
[
"VisIt"
] |
570afb0f78a06bf1836a1683013d76b446dbfd5f25258fb51820553446dd706e
|
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "Object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
result = parse(url)
pprint(result)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ingadhoc/odoo-help
|
document_page_adhoc/web/widgets/rss/feedparser.py
|
Python
|
agpl-3.0
| 122,515
|
[
"NetCDF",
"VisIt"
] |
28cdd6e1960bd39474ad127b129908e11e6f5b40fcf62b5bc6bf27597fc8b308
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 733 $"
import logging
import numpy
from calculationmethod import Method
class Population(Method):
"""A base class for all population-type methods."""
def __init__(self, data, progress=None, \
loglevel=logging.INFO, logname="Log"):
# Call the __init__ method of the superclass.
super(Population, self).__init__(data, progress, loglevel, logname)
self.fragresults = None
def __str__(self):
"""Return a string representation of the object."""
return "Population"
def __repr__(self):
"""Return a representation of the object."""
return "Population"
def partition(self, indices=None):
if not hasattr(self, "aoresults"):
self.calculate()
if not indices:
# Build list of groups of orbitals in each atom for atomresults.
if hasattr(self.data, "aonames"):
names = self.data.aonames
elif hasattr(self.data, "fonames"):
names = self.data.fonames
atoms = []
indices = []
name = names[0].split('_')[0]
atoms.append(name)
indices.append([0])
for i in range(1, len(names)):
name = names[i].split('_')[0]
try:
index = atoms.index(name)
except ValueError: #not found in atom list
atoms.append(name)
indices.append([i])
else:
indices[index].append(i)
natoms = len(indices)
nmocoeffs = len(self.aoresults[0])
# Build results numpy array[3].
alpha = len(self.aoresults[0])
results = []
results.append(numpy.zeros([alpha, natoms], "d"))
if len(self.aoresults) == 2:
beta = len(self.aoresults[1])
results.append(numpy.zeros([beta, natoms], "d"))
# For each spin, splice numpy array at ao index,
# and add to correct result row.
for spin in range(len(results)):
for i in range(natoms): # Number of groups.
for j in range(len(indices[i])): # For each group.
temp = self.aoresults[spin][:, indices[i][j]]
results[spin][:, i] = numpy.add(results[spin][:, i], temp)
self.logger.info("Saving partitioned results in fragresults: [array[2]]")
self.fragresults = results
return True
if __name__ == "__main__":
import doctest, population
doctest.testmod(population, verbose=False)
|
comocheng/RMG-Py
|
external/cclib/method/population.py
|
Python
|
mit
| 2,808
|
[
"cclib"
] |
af89d1ecdc96643bb40c2fb517b0dae9aa865325d9dae561e5681684299ff334
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.