text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import argparse
import numpy as np
from datetime import datetime
import sklearn.gaussian_process as gp
import matplotlib.pyplot as plot
import parser as gcparser
import os
import os.path
import running
def main(indir, outdir):
"""
Main driver method.
"""
# Generate a list of all the files.
listing = filelisting(indir)
# Parse each file.
timestamps = []
distances = []
splits = []
i = 0
for f in listing:
p = gcparser.GCFileParser(f)
t, d, s = p.parse()
if t is None and d is None and s is None: continue
# Append the data.
timestamps.append(t)
distances.append(d)
splits.append(s)
i += 1
print '.'
# Sort the data.
timestamps = np.array(timestamps)
numRuns = np.size(timestamps)
sortInd = np.argsort(timestamps)
# Loop through the sorted arrays, generating a graph.
X = np.atleast_2d(np.linspace(0, numRuns, numRuns, endpoint = False)).T
y = np.zeros(np.size(timestamps))
dy = np.zeros(np.size(y))
for i in range(0, np.size(sortInd)):
ind = sortInd[i]
d = running.metersToMiles(distances[ind])
s = running.secondsToMinutes(splits[ind])
y[i] = running.averagePace(d, s)
dy[i] = np.std(s)
dy += 0.01
process = gp.GaussianProcess(corr = 'squared_exponential',
nugget = (dy / y) ** 2, theta0 = 1e-1, thetaL = 1e-3,
thetaU = 1, random_start = 100)
process.fit(X, y)
# Set up a prediction.
x = np.atleast_2d(np.linspace(0, numRuns, numRuns * 10)).T
y_pred, MSE = process.predict(x, eval_MSE = True)
sigma = np.sqrt(MSE)
# Plot the prediction and the 95% confidence interval.
plot.plot(X.ravel(), y, c = 'r', marker = '+', ls = 'None', markersize = 10, label = 'Runs')
plot.plot(x, y_pred, 'b-', label = 'Prediction')
plot.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.96 * sigma,
(y_pred + 1.96 * sigma)[::-1]]),
alpha = 0.5, fc = 'b', ec = 'None', label = '95% confidence')
plot.ylabel('Average Pace (minutes)')
locs, labels = plot.xticks()
locs = locs[np.where(locs < numRuns)]
newlabels = [datetime.fromtimestamp(timestamps[loc]).strftime("%Y/%m/%d") for loc in locs]
plot.xticks(locs, newlabels)
plot.legend(loc = 0)
plot.show()
def filelisting(directory, suffix = 'tcx'):
"""
Generates a list of all the files in the directory.
"""
files = []
for f in os.listdir(directory):
fullpath = os.path.join(directory, f)
if os.path.isfile(fullpath) and f.endswith(suffix):
files.append(fullpath)
return files
if __name__ == "__main__":
print 'Guinea pigs, that is!\n'
print " , , "
print " \ | \ / / / /"
print " / o ,) \\"
print " C / / \\"
print " \_ ( /"
print " mm --- mooo-\n"
parser = argparse.ArgumentParser(description = 'Gaussian Processes on GC',
epilog = 'guinea pig = gp',
add_help = 'How to use',
prog = 'python gp.py -i <input dir> -o <output dir>')
parser.add_argument('-i', '--input', required = True,
help = 'Input directory, contains lots of .tcx files.')
parser.add_argument('-o', '--output', required = False,
default = None, help = 'Output directory.')
args = vars(parser.parse_args())
main(args['input'], args['output'])
|
magsol/garmin
|
gp.py
|
Python
|
mit
| 3,648
|
[
"Gaussian"
] |
93fc88e11f23e3fd3d181a407bf98196987a67aac37ef4d3f7b7252dbd1f4439
|
import sugartensor as tf
__author__ = 'namju.kim@kakaobrain.com'
# set log level to debug
tf.sg_verbosity(10)
#
# hyper parameters
#
batch_size = 32 # batch size
num_dim = 50 # latent dimension
#
# inputs
#
# MNIST input tensor ( with QueueRunner )
data = tf.sg_data.Mnist(batch_size=32)
# input images
x = data.train.image
#
# Computational graph
#
# encoder network
with tf.sg_context(name='encoder', size=4, stride=2, act='relu'):
mu = (x
.sg_conv(dim=64)
.sg_conv(dim=128)
.sg_flatten()
.sg_dense(dim=1024)
.sg_dense(dim=num_dim, act='linear'))
# re-parameterization trick with random gaussian
z = mu + tf.random_normal(mu.get_shape())
# decoder network
with tf.sg_context(name='decoder', size=4, stride=2, act='relu'):
xx = (z
.sg_dense(dim=1024)
.sg_dense(dim=7*7*128)
.sg_reshape(shape=(-1, 7, 7, 128))
.sg_upconv(dim=64)
.sg_upconv(dim=1, act='sigmoid'))
# add image summary
tf.sg_summary_image(x, name='origin')
tf.sg_summary_image(xx, name='recon')
# loss
loss_recon = xx.sg_mse(target=x, name='recon').sg_mean(axis=[1, 2, 3])
loss_kld = tf.square(mu).sg_sum(axis=1) / (28 * 28)
tf.sg_summary_loss(loss_kld, name='kld')
loss = loss_recon + loss_kld * 0.5
# do training
tf.sg_train(loss=loss, log_interval=10, ep_size=data.train.num_batch, max_ep=30, early_stop=False,
save_dir='asset/train/vae')
|
buriburisuri/sugartensor
|
sugartensor/example/mnist_vae.py
|
Python
|
mit
| 1,452
|
[
"Gaussian"
] |
ba37f6de33b52faa962d6c161edfcb290484597d0873f71a96ac2c6cc06c5e91
|
# Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from run_pines import build_model, getLocations
from sklearn.neighbors import KernelDensity
# import matplotlib2tikz
X = getLocations()
Ms = [14, 16, 18, 20, 22, 24, 26, 28, 30]
def plot_model(m, sample_df, ax, gridResolution=64):
intensities = []
for _, s in sample_df.iterrows():
m.set_parameter_dict(s)
mu, _ = m.predict_y(m.X.value)
intensities.append(mu)
intensity = np.mean(intensities, 0)
ax.imshow(np.flipud(intensity.reshape(gridResolution, gridResolution).T),
interpolation='nearest', extent=[0, 1, 0, 1], cmap=plt.cm.viridis,
vmin=0.005, vmax=0.18)
f, axes = plt.subplots(2, 5, sharex=True, sharey=True, figsize=(12, 5))
for ax, M in zip(axes.flat, Ms):
continue
m = build_model(M)
df = pd.read_pickle('samples_df_M{}.pickle'.format(M))
# df = df.ix[::100] # thin for speed
plot_model(m, df, ax)
ax.set_title(str(M))
# axes.flatten()[-1].plot(X[:, 0], X[:, 1], 'k.')
# matplotlib2tikz.save('pines_intensity.tikz')
# plot the convergence of the patermeters:
f, axes = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(12, 5))
keys = ['model.kerns.item0.lengthscales', 'model.kerns.item1.lengthscales']
titles = ['lengthscale (horz.)', 'lengthscale (vert)']
mins = [0, 0]
maxs = [0.4, 0.4]
for key, title, ax, xmin, xmax in zip(keys, titles, axes.flatten(), mins, maxs):
for M in Ms:
m = build_model(M)
df = pd.read_pickle('samples_df_M{}.pickle'.format(M))
ls = np.vstack(df[key])
kde = KernelDensity(kernel='gaussian', bandwidth=0.05).fit(ls)
X_plot = np.linspace(xmin, xmax, 100)[:, None]
ax.plot(X_plot, np.exp(kde.score_samples(X_plot)), label=str(M))
ax.legend()
# matplotlib2tikz.save('pines_lengthscale_convergence.tikz')
|
jameshensman/VFF
|
experiments/mcmc_pines/plot_pines.py
|
Python
|
apache-2.0
| 2,449
|
[
"Gaussian"
] |
3c09735a3438cb7246beeebc75c3d238cbea7e14fddcc17f9b2a93acb67fa22e
|
import os
import parabem
import numpy
class CaseToVTK():
def __init__(self, case, _dir, suffix=None):
"""automatic export of a case"""
self.case = case
self.make_sure_path_exists(_dir)
self._dir = _dir
self.suffix = ""
if suffix:
self.suffix = "_" + suffix
def write_panels(self, cp=True, vel=True, pot=True, data_type="cell"):
"""type=cell/point"""
with open(self._dir + "/panels" + self.suffix + ".vtk", "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "panels")
writer.points(_file, self.vertices)
writer.flat_cells(_file, self.panels)
if data_type != "point":
if cp:
writer.data(_file, self.pan_cp, "cp")
if pot:
writer.data(_file, self.pan_pot, "potential")
if vel:
writer.data(_file, self.pan_vel, "velocity", _type="VECTORS")
else:
if cp:
writer.data(_file, self.vert_cp, "cp", data_type="POINT_DATA")
if pot:
writer.data(_file, self.vert_pot, "potential", data_type="POINT_DATA")
if vel:
writer.data(_file, self.vert_vel, "velocity", _type="VECTORS", data_type="POINT_DATA")
def write_wake_panels(self, pot=True, normal=True):
with open(self._dir + "/wakepanels" + self.suffix + ".vtk", "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "wakepanels")
writer.points(_file, self.wake_vertices)
writer.flat_cells(_file, self.wake_panels)
if normal:
writer.data(_file, [i.n for i in self.case.wake_panels], "normals", _type="VECTORS")
if pot:
writer.data(_file, self.wake_pot, "potential")
def write_field(self, x_seq, y_seq, z_seq, vel=True, pot=True, perturbation=False):
dim = [x_seq[-1], y_seq[-1], z_seq[-1]]
x_grid = numpy.linspace(*x_seq)
y_grid = numpy.linspace(*y_seq)
z_grid = numpy.linspace(*z_seq)
vertices = [parabem.PanelVector3(x, y, z) for z in z_grid for y in y_grid for x in x_grid]
for vert in vertices:
if pot:
self.case.off_body_potential(vert)
if vel or perturbation:
self.case.off_body_velocity(vert)
with open(self._dir + "/field.vtk", "w") as _file:
writer = VtkWriter()
writer.structed_grid(_file, "field_data", dim)
writer.points(_file, vertices)
if pot:
_potential = [vert.potential for vert in vertices]
writer.data(_file, _potential, "potential", data_type="POINT_DATA")
if vel:
_velocity = [vert.velocity for vert in vertices]
writer.data(_file, _velocity, "velocity", _type="VECTORS", data_type="POINT_DATA")
if perturbation:
_velocity = [vert.velocity.x - self.case.v_inf.x for vert in vertices]
writer.data(_file, _velocity, "velocity", data_type="POINT_DATA")
def write_stream_lines(self, start_points=[[0, 0, 0]], interval=0.01, numpoints=100):
vertices, line_numbers = self._stream_lines(start_points, interval, numpoints)
with open(self._dir + "/stream_lines.vtk", "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "flow_path")
writer.points(_file, vertices)
writer.lines(_file, line_numbers)
def write_body_stream(self, start_panels, num_panels=20):
vertices, line_numbers = self._body_stream_lines(start_panels, num_panels)
with open(self._dir + "/body_stream_lines.vtk", "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "flow_path")
writer.points(_file, vertices)
writer.lines(_file, line_numbers)
@staticmethod
def make_sure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
return path
@property
def vertices(self):
"""set the vertex numbers"""
return self.case.vertices
@property
def wake_vertices(self):
return [vert for pan in self.case.wake_panels for vert in pan.points]
@property
def panels(self):
return [[vert.nr for vert in pan.points] for pan in self.case.panels]
@property
def wake_panels(self):
pans_nr = []
i = 0
for pan in self.case.wake_panels:
verts_nr = []
for vert in pan.points:
verts_nr.append(i)
i += 1
pans_nr.append(verts_nr)
return(pans_nr)
@property
def pan_vel(self):
return [pan.velocity for pan in self.case.panels]
@property
def vert_vel(self):
return [vert.velocity for vert in self.case.vertices]
@property
def pan_cp(self):
return [pan.cp for pan in self.case.panels]
@property
def vert_cp(self):
return [vert.cp for vert in self.case.vertices]
@property
def wake_pot(self):
return [pan.potential for pan in self.case.wake_panels]
@property
def pan_pot(self):
return [pan.potential for pan in self.case.panels]
@property
def vert_pot(self):
return [vert.potential for vert in self.case.vertices]
def _stream_lines(self, start_points=[[0, 0, 0]], interval=0.01, numpoints=100):
if not isinstance(start_points[0], parabem.Vector3):
start_points = list(map(parabem.Vector3, start_points))
print("COMPUTE STREAMLINES")
flow_paths = [self.case.flow_path(i, interval, numpoints) for i in start_points]
vertices = []
fpw = []
i = 0
for fp in flow_paths:
fpwi = []
for vertex in fp:
vertices.append(vertex)
fpwi.append(i)
i += 1
fpw.append(fpwi)
return vertices, fpw
def _body_stream_lines(self, start_panels, numpanels=100):
flow_paths = [self.case.body_flow_path(i, numpanels) for i in start_panels]
vertices = []
fpw = []
i = 0
for fp in flow_paths:
fpwi = []
for vertex in fp:
vertices.append(vertex)
fpwi.append(i)
i += 1
fpw.append(fpwi)
return vertices, fpw
class VtkWriter():
def __init__(self):
self._data_set = False
def structed_grid(self, _file, name, dim):
_file.write("# vtk DataFile Version 3.0\n")
_file.write(name + " \n")
_file.write("ASCII\n")
_file.write("DATASET STRUCTURED_GRID\n")
_file.write("DIMENSIONS ")
for dim_i in dim:
_file.write(str(dim_i) + " ")
def unstructed_grid(self, _file, name):
_file.write("# vtk DataFile Version 3.0\n")
_file.write(name + " \n")
_file.write("ASCII\n")
_file.write("DATASET UNSTRUCTURED_GRID\n")
def points(self, _file, vertices):
_file.write("POINTS " + str(len(vertices)) + " float\n")
for point in vertices:
v = parabem.Vector3(point)
_file.write(str(v[0]) + " " +
str(v[1]) + " " +
str(v[2]) + "\n")
def flat_cells(self, _file, cells):
_file.write("\nCELLS " + str(len(cells)) + " ")
n = 0
for i in cells:
n += len(i) + 1
_file.write(str(n) + "\n")
for i in cells:
_file.write(str(len(i)))
for j in i:
_file.write(" " + str(j))
_file.write("\n")
_file.write("\nCELL_TYPES " + str(len(cells)) + "\n")
for j, i in enumerate(cells):
if j % 5 == 0:
_file.write("\n")
if len(i) == 3:
_file.write("5 ")
elif len(i) == 4:
_file.write("9 ")
else:
_file.write("7 ")
_file.write("\n")
def data(self, _file, data, name="data", _type="SCALARS", data_type="CELL_DATA"):
if not self._data_set:
_file.write(data_type +" " + str(len(data)) + "\n")
self._data_set = True
if _type =="SCALARS":
_file.write("SCALARS " + name + " float\n")
_file.write("LOOKUP_TABLE default")
for j, value in enumerate(data):
if j % 5 == 0:
_file.write("\n")
_file.write(str(value) + " ")
_file.write("\n\n")
if _type =="VECTORS":
_file.write("VECTORS " + name + " float\n")
_file.write("\n")
for point in data:
v = parabem.Vector3(point)
_file.write(
str(v[0]) + " " +
str(v[1]) + " " +
str(v[2]) + "\n")
_file.write("\n\n")
def lines(self, _file, lines):
"""lines = [[0,1,2,3,4,5], [6,7,8,9,10]...]"""
n_lines = sum(map(len, lines)) - len(lines)
_file.write("\nCELLS " + str(n_lines) + " " + str(n_lines * 3) + "\n")
for line in lines:
for j, k in enumerate(line[:-1]):
_file.write("2 ")
_file.write(str(k) + " " + str(line[j + 1]))
_file.write("\n")
_file.write("\nCELL_TYPES " + str(n_lines) + "\n")
for k, j in enumerate(range(n_lines)):
if j % 5 == 0:
_file.write("\n")
_file.write("3 ")
_file.write("\n")
|
looooo/panel-method
|
parabem/vtk_export/__init__.py
|
Python
|
gpl-3.0
| 9,746
|
[
"VTK"
] |
2275c0a3c90e37ecb244f60f728d397452e2b93b37e9a01c18c95e23c716ea7b
|
from math import pi
import numpy as np
from ase.atoms import Atoms
def make_test_dft_calculation():
a = b = 2.0
c = 6.0
atoms = Atoms(positions=[(0, 0, c / 2)],
symbols='H',
pbc=(1, 1, 0),
cell=(a, b, c),
calculator=TestCalculator())
return atoms
class TestCalculator:
def __init__(self, nk=8):
assert nk % 2 == 0
bzk = []
weights = []
ibzk = []
w = 1.0 / nk**2
for i in range(-nk + 1, nk, 2):
for j in range(-nk + 1, nk, 2):
k = (0.5 * i / nk, 0.5 * j / nk, 0)
bzk.append(k)
if i >= j > 0:
ibzk.append(k)
if i == j:
weights.append(4 * w)
else:
weights.append(8 * w)
assert abs(sum(weights) - 1.0) < 1e-12
self.bzk = np.array(bzk)
self.ibzk = np.array(ibzk)
self.weights = np.array(weights)
# Calculate eigenvalues and wave functions:
self.init()
def init(self):
nibzk = len(self.weights)
nbands = 1
V = -1.0
self.eps = 2 * V * (np.cos(2 * pi * self.ibzk[:, 0]) +
np.cos(2 * pi * self.ibzk[:, 1]))
self.eps.shape = (nibzk, nbands)
self.psi = np.zeros((nibzk, 20, 20, 60), complex)
phi = np.empty((2, 2, 20, 20, 60))
z = np.linspace(-1.5, 1.5, 60, endpoint=False)
for i in range(2):
x = np.linspace(0, 1, 20, endpoint=False) - i
for j in range(2):
y = np.linspace(0, 1, 20, endpoint=False) - j
r = (((x[:, None]**2 +
y**2)[:, :, None] +
z**2)**0.5).clip(0, 1)
phi = 1.0 - r**2 * (3.0 - 2.0 * r)
phase = np.exp(pi * 2j * np.dot(self.ibzk, (i, j, 0)))
self.psi += phase[:, None, None, None] * phi
def get_pseudo_wave_function(self, band=0, kpt=0, spin=0):
assert spin == 0 and band == 0
return self.psi[kpt]
def get_eigenvalues(self, kpt=0, spin=0):
assert spin == 0
return self.eps[kpt]
def get_number_of_bands(self):
return 1
def get_k_point_weights(self):
return self.weights
def get_number_of_spins(self):
return 1
def get_fermi_level(self):
return 0.0
class TestPotential:
def get_forces(self, atoms):
E = 0.0
R = atoms.positions
F = np.zeros_like(R)
for a, r in enumerate(R):
D = R - r
d = (D**2).sum(1)**0.5
x = d - 1.0
E += np.vdot(x, x)
d[a] = 1
F -= (x / d)[:, None] * D
self.energy = 0.25 * E
return F
def get_potential_energy(self, atoms):
self.get_forces(atoms)
return self.energy
def get_stress(self, atoms):
raise NotImplementedError
def numeric_force(atoms, a, i, d=0.001):
"""Evaluate force along i'th axis on a'th atom using finite difference.
This will trigger two calls to get_potential_energy(), with atom a moved
plus/minus d in the i'th axial direction, respectively.
"""
p0 = atoms.positions[a, i]
atoms.positions[a, i] += d
eplus = atoms.get_potential_energy()
atoms.positions[a, i] -= 2 * d
eminus = atoms.get_potential_energy()
atoms.positions[a, i] = p0
return (eminus - eplus) / (2 * d)
def numeric_forces(atoms, indices=None, axes=(0, 1, 2), d=0.001):
"""Evaluate finite-difference forces on several atoms.
Returns an array of forces for each specified atomic index and
each specified axis, calculated using finite difference on each
atom and direction separately. Array has same shape as if
returned from atoms.get_forces(); uncalculated elements are zero.
Calculates all forces by default."""
if indices is None:
indices = range(len(atoms))
F_ai = np.zeros_like(atoms.positions)
for a in indices:
for i in axes:
F_ai[a, i] = numeric_force(atoms, a, i, d)
return F_ai
|
slabanja/ase
|
ase/calculators/test.py
|
Python
|
gpl-2.0
| 4,190
|
[
"ASE"
] |
0784632f64b657d175ff1cdc6e2135613abdf5aa5529cb4f11a8eb0e043e48b6
|
"""Collection of :class:`~chainer.Function` implementations."""
from chainer.functions.activation import clipped_relu
from chainer.functions.activation import elu
from chainer.functions.activation import leaky_relu
from chainer.functions.activation import log_softmax
from chainer.functions.activation import lstm
from chainer.functions.activation import maxout
from chainer.functions.activation import prelu
from chainer.functions.activation import relu
from chainer.functions.activation import sigmoid
from chainer.functions.activation import slstm
from chainer.functions.activation import softmax
from chainer.functions.activation import softplus
from chainer.functions.activation import tanh
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import copy
from chainer.functions.array import expand_dims
from chainer.functions.array import reshape
from chainer.functions.array import select_item
from chainer.functions.array import split_axis
from chainer.functions.array import swapaxes
from chainer.functions.array import transpose
from chainer.functions.array import where
from chainer.functions.connection import bilinear
from chainer.functions.connection import convolution_2d
from chainer.functions.connection import deconvolution_2d
from chainer.functions.connection import embed_id
from chainer.functions.connection import linear
from chainer.functions.evaluation import accuracy
from chainer.functions.evaluation import binary_accuracy
from chainer.functions.loss import contrastive
from chainer.functions.loss import cross_covariance
from chainer.functions.loss import ctc
from chainer.functions.loss import hinge
from chainer.functions.loss import mean_squared_error
from chainer.functions.loss import negative_sampling
from chainer.functions.loss import sigmoid_cross_entropy
from chainer.functions.loss import softmax_cross_entropy
from chainer.functions.loss import vae # NOQA
from chainer.functions.math import basic_math # NOQA
from chainer.functions.math import batch_l2_norm_squared
from chainer.functions.math import det
from chainer.functions.math import exponential
from chainer.functions.math import identity
from chainer.functions.math import inv
from chainer.functions.math import matmul
from chainer.functions.math import minmax
from chainer.functions.math import sum
from chainer.functions.math import trigonometric
from chainer.functions.noise import dropout
from chainer.functions.noise import gaussian
from chainer.functions.normalization import batch_normalization
from chainer.functions.normalization import local_response_normalization
from chainer.functions.pooling import average_pooling_2d
from chainer.functions.pooling import max_pooling_2d
from chainer.functions.pooling import spatial_pyramid_pooling_2d
from chainer.functions.pooling import unpooling_2d
from chainer.links.activation import prelu as links_prelu
from chainer.links.connection import bilinear as links_bilinear
from chainer.links.connection import convolution_2d as links_convolution_2d
from chainer.links.connection import embed_id as links_embed_id
from chainer.links.connection import inception
from chainer.links.connection import inceptionbn
from chainer.links.connection import linear as links_linear
from chainer.links.connection import parameter
from chainer.links.loss import hierarchical_softmax
from chainer.links.loss import negative_sampling as links_negative_sampling
from chainer.links.normalization import batch_normalization \
as links_batch_normalization
ClippedReLU = clipped_relu.ClippedReLU
clipped_relu = clipped_relu.clipped_relu
ConnectionistTemporalClassification = ctc.ConnectionistTemporalClassification
connectionist_temporal_classification \
= ctc.connectionist_temporal_classification
ELU = elu.ELU
elu = elu.elu
LeakyReLU = leaky_relu.LeakyReLU
leaky_relu = leaky_relu.leaky_relu
LogSoftmax = log_softmax.LogSoftmax
log_softmax = log_softmax.log_softmax
LSTM = lstm.LSTM
lstm = lstm.lstm
maxout = maxout.maxout
prelu = prelu.prelu
ReLU = relu.ReLU
relu = relu.relu
Sigmoid = sigmoid.Sigmoid
sigmoid = sigmoid.sigmoid
SLSTM = slstm.SLSTM
slstm = slstm.slstm
Softmax = softmax.Softmax
softmax = softmax.softmax
Softplus = softplus.Softplus
softplus = softplus.softplus
Tanh = tanh.Tanh
tanh = tanh.tanh
Broadcast = broadcast.Broadcast
BroadcastTo = broadcast.BroadcastTo
broadcast_to = broadcast.broadcast_to
broadcast = broadcast.broadcast
Concat = concat.Concat
concat = concat.concat
Copy = copy.Copy
copy = copy.copy
ExpandDims = expand_dims.ExpandDims
expand_dims = expand_dims.expand_dims
Reshape = reshape.Reshape
reshape = reshape.reshape
SplitAxis = split_axis.SplitAxis
split_axis = split_axis.split_axis
SelectItem = select_item.SelectItem
select_item = select_item.select_item
Swapaxes = swapaxes.Swapaxes
swapaxes = swapaxes.swapaxes
Transpose = transpose.Transpose
transpose = transpose.transpose
Where = where.Where
where = where.where
bilinear = bilinear.bilinear
convolution_2d = convolution_2d.convolution_2d
deconvolution_2d = deconvolution_2d.deconvolution_2d
embed_id = embed_id.embed_id
linear = linear.linear
Accuracy = accuracy.Accuracy
accuracy = accuracy.accuracy
BinaryAccuracy = binary_accuracy.BinaryAccuracy
binary_accuracy = binary_accuracy.binary_accuracy
bernoulli_nll = vae.bernoulli_nll
BinaryHierarchicalSoftmax = hierarchical_softmax.BinaryHierarchicalSoftmax
Contrastive = contrastive.Contrastive
contrastive = contrastive.contrastive
CrossCovariance = cross_covariance.CrossCovariance
cross_covariance = cross_covariance.cross_covariance
gaussian_kl_divergence = vae.gaussian_kl_divergence
gaussian_nll = vae.gaussian_nll
Hinge = hinge.Hinge
hinge = hinge.hinge
MeanSquaredError = mean_squared_error.MeanSquaredError
mean_squared_error = mean_squared_error.mean_squared_error
negative_sampling = negative_sampling.negative_sampling
SigmoidCrossEntropy = sigmoid_cross_entropy.SigmoidCrossEntropy
sigmoid_cross_entropy = sigmoid_cross_entropy.sigmoid_cross_entropy
SoftmaxCrossEntropy = softmax_cross_entropy.SoftmaxCrossEntropy
softmax_cross_entropy = softmax_cross_entropy.softmax_cross_entropy
BatchDet = det.BatchDet
batch_det = det.batch_det
BatchInv = inv.BatchInv
batch_inv = inv.batch_inv
BatchL2NormSquared = batch_l2_norm_squared.BatchL2NormSquared
batch_l2_norm_squared = batch_l2_norm_squared.batch_l2_norm_squared
BatchMatMul = matmul.BatchMatMul
batch_matmul = matmul.batch_matmul
Cos = trigonometric.Cos
cos = trigonometric.cos
det = det.det
Exp = exponential.Exp
exp = exponential.exp
Identity = identity.Identity
identity = identity.identity
Inv = inv.Inv
inv = inv.inv
Log = exponential.Log
log = exponential.log
MatMul = matmul.MatMul
matmul = matmul.matmul
Max = minmax.Max
max = minmax.max
Min = minmax.Min
min = minmax.min
Sin = trigonometric.Sin
sin = trigonometric.sin
Sum = sum.Sum
sum = sum.sum
Dropout = dropout.Dropout
dropout = dropout.dropout
Gaussian = gaussian.Gaussian
gaussian = gaussian.gaussian
fixed_batch_normalization = batch_normalization.fixed_batch_normalization
batch_normalization = batch_normalization.batch_normalization
LocalResponseNormalization = \
local_response_normalization.LocalResponseNormalization
local_response_normalization = \
local_response_normalization.local_response_normalization
AveragePooling2D = average_pooling_2d.AveragePooling2D
average_pooling_2d = average_pooling_2d.average_pooling_2d
MaxPooling2D = max_pooling_2d.MaxPooling2D
max_pooling_2d = max_pooling_2d.max_pooling_2d
SpatialPyramidPooling2D = spatial_pyramid_pooling_2d.SpatialPyramidPooling2D
spatial_pyramid_pooling_2d = \
spatial_pyramid_pooling_2d.spatial_pyramid_pooling_2d
Unpooling2D = unpooling_2d.Unpooling2D
unpooling_2d = unpooling_2d.unpooling_2d
# Import for backward compatibility
PReLU = links_prelu.PReLU
Bilinear = links_bilinear.Bilinear
Convolution2D = links_convolution_2d.Convolution2D
EmbedID = links_embed_id.EmbedID
Inception = inception.Inception
InceptionBN = inceptionbn.InceptionBN
Linear = links_linear.Linear
Parameter = parameter.Parameter
BinaryHierarchicalSoftmax = hierarchical_softmax.BinaryHierarchicalSoftmax
NegativeSampling = links_negative_sampling.NegativeSampling
BatchNormalization = links_batch_normalization.BatchNormalization
|
cemoody/chainer
|
chainer/functions/__init__.py
|
Python
|
mit
| 8,284
|
[
"Gaussian"
] |
d2fca82b8a45673d8d23335be4fe6e2ebb3b0212edb968e1adbc314914ebfdd0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is a demonstration of a small domain-specific language for building
strings up out of other strings.
You can define variables that are equal to literal strings or built up out of
other variables.
Evaluating a program will return a dictionary of all the variables expanded
into their full strings.
Variables are evaluated in order, so redefining one will overwrite the previous
definition.
Comments start with a hash character ('#').
For example::
{
# test program
a = "xyz"
b = "abc"
c = "def"
c = "333" # overwrites def
d = c + a + b
}
This would return a python dictionary::
{"a" : "xyz", b: "abc", c: "333", d: "333xyzabc"}
"""
import sys
from parsimonious import Grammar, NodeVisitor
string_expression_grammar = Grammar(r"""# JSON grammar
program = program_body / ignored_line*
program_body = ignored_line* begin line* end ignored_line*
ignored_line = comment_line / blank_line
comment_body = whitespace comment_symbol comment_text
comment_line = comment_body newline
blank_line = whitespace newline?
line = expression / ignored_line
begin = whitespace begin_symbol whitespace
end = whitespace end_symbol whitespace
plus = whitespace plus_symbol whitespace
expression = whitespace variable whitespace assignment_symbol whitespace
expression_value whitespace newline?
expression_value = literal_string / identifier_group
identifier_group = identifier_value (plus identifier_value)*
literal_string = ~"'.*'"
comment_text = ~".*"
variable = identifier
identifier_value = identifier
identifier = ~"[a-zA-Z0-9_]+"
single_whitespace = ~"[ \t]"
whitespace = single_whitespace*
begin_symbol = "{"
end_symbol = "}"
comment_symbol = "#"
assignment_symbol = "="
plus_symbol = "+"
newline = "\n"
""")
class IdentifierGroupVisitor(NodeVisitor):
def __init__(self, identifiers=None):
super().__init__()
self.identifiers = identifiers or {}
def visit_identifier(self, node, visited_children):
return self.identifiers[node.text]
def visit(self, node):
values = self.collect_matching_children(node, 'identifier')
return ''.join(values)
def collect_matching_children(self, node, expr_name):
if node.expr_name == expr_name:
return super().visit(node)
else:
nodes = []
if node.children:
for child_node in node.children:
value = self.collect_matching_children(child_node, expr_name)
if type(value) != type([]):
nodes.append(value)
elif len(value) > 0:
nodes += value
return nodes
class StringExpressionVisitor(NodeVisitor):
def __init__(self, identifiers = None):
super().__init__()
self.identifiers = identifiers or {}
def visit_literal_string(self, node, visited_children):
return node.text.strip("'")
def visit_identifier(self, node, visited_children):
return node.text
def visit_variable(self, node, visited_children):
return node.text
def visit_expression_value(self, node, visited_children):
return ''.join(visited_children)
def visit_expression(self, node, visited_children):
result = [self.visit(child_node) for child_node in node.children]
result = [item for item in result if item]
variable_name = result[0]
expression_value = result[1]
self.identifiers[variable_name] = expression_value
def visit_identifier_group(self, node, visited_children):
identifier_group_node_visitor = IdentifierGroupVisitor(identifiers=self.identifiers)
return identifier_group_node_visitor.visit(node)
def generic_visit(self, node, visited_children):
pass
def evaluate(string_expressions):
root_node = string_expression_grammar.parse(string_expressions)
node_visitor = StringExpressionVisitor()
node_visitor.visit(root_node)
return node_visitor.identifiers
def main():
filename = sys.argv[1]
with open(filename) as file:
contents = file.read()
print(contents)
print( evaluate(contents) )
if __name__ == '__main__':
main()
|
codeyash/plugins
|
PyPlugins/PhpParser/py/string_expression_language.py
|
Python
|
apache-2.0
| 4,427
|
[
"VisIt"
] |
33a22bfd4de3b17bed14536e5a14194ed95457e8357abd5ae206d62e9c47668f
|
"""Metrics to assess performance on classification task given class prediction.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# Shangwu Yao <shangwuyao@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from ._base import _check_pos_label_consistency
def _check_zero_division(zero_division):
if isinstance(zero_division, str) and zero_division == "warn":
return
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
return
raise ValueError('Got zero_division={0}.'
' Must be one of ["warn", 0, 1]'.format(zero_division))
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task.
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``.
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} "
"and {1} targets".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
try:
unique_values = np.union1d(y_true, y_pred)
except TypeError as e:
# We expect y_true and y_pred to be of the same data type.
# If `y_true` was provided to the classifier as strings,
# `y_pred` given by the classifier will also be encoded with
# strings. So we raise a meaningful error
raise TypeError(
f"Labels in y_true and y_pred should be of the same type. "
f"Got y_true={np.unique(y_true)} and "
f"y_pred={np.unique(y_pred)}. Make sure that the "
f"predictions provided by the classifier coincides with "
f"the true labels."
) from e
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
@_deprecate_positional_args
def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, default=True
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the fraction of correctly
classified samples (float), else returns the number of correctly
classified samples (int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See Also
--------
jaccard_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_score`` function.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
@_deprecate_positional_args
def confusion_matrix(y_true, y_pred, *, labels=None, sample_weight=None,
normalize=None):
"""Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like of shape (n_classes), default=None
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If ``None`` is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix whose i-th row and j-th
column entry indicates the number of
samples with true label being i-th class
and predicted label being j-th class.
See Also
--------
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
ConfusionMatrixDisplay : Confusion Matrix visualization.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes).
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
if n_labels == 0:
raise ValueError("'labels' should contains at least one label.")
elif y_true.size == 0:
return np.zeros((n_labels, n_labels), dtype=int)
elif np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
cm = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
with np.errstate(all='ignore'):
if normalize == 'true':
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == 'pred':
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == 'all':
cm = cm / cm.sum()
cm = np.nan_to_num(cm)
return cm
@_deprecate_positional_args
def multilabel_confusion_matrix(y_true, y_pred, *, sample_weight=None,
labels=None, samplewise=False):
"""Compute a confusion matrix for each class or sample.
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Ground truth (correct) target values.
y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like of shape (n_classes,), default=None
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data).
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample.
Returns
-------
multi_confusion : ndarray of shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See Also
--------
confusion_matrix
Notes
-----
The multilabel_confusion_matrix calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while confusion_matrix calculates
one confusion matrix for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError("Samplewise metrics are not available outside of "
"multilabel classification.")
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
@_deprecate_positional_args
def cohen_kappa_score(y1, y2, *, labels=None, weights=None,
sample_weight=None):
r"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array of shape (n_samples,)
Labels assigned by the first annotator.
y2 : array of shape (n_samples,)
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : {'linear', 'quadratic'}, default=None
Weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596
<https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_.
.. [3] `Wikipedia entry for the Cohen's kappa
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_.
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
@_deprecate_positional_args
def jaccard_score(y_true, y_pred, *, labels=None, pos_label=1,
average='binary', sample_weight=None, zero_division="warn"):
"""Jaccard similarity coefficient score.
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array-like of shape (n_classes,), default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {None, 'micro', 'macro', 'samples', 'weighted', \
'binary'}, default='binary'
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", {0.0, 1.0}, default="warn"
Sets the value to return when there is a zero division, i.e. when there
there are no negative values in predictions and labels. If set to
"warn", this acts like 0, but a warning is also raised.
Returns
-------
score : float (if average is not None) or array of floats, shape =\
[n_unique_labels]
See Also
--------
accuracy_score, f_score, multilabel_confusion_matrix
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
jaccard = _prf_divide(numerator, denominator, 'jaccard',
'true or predicted', average, ('jaccard',),
zero_division=zero_division)
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
# numerator is 0, and warning should have already been issued
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
@_deprecate_positional_args
def matthews_corrcoef(y_true, y_pred, *, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC).
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Binary and multiclass labels are supported. Only in the binary case does
this relate to information about true and false positives and negatives.
See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<https://doi.org/10.1093/bioinformatics/16.5.412>`_.
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_.
.. [3] `Gorodkin, (2004). Comparing two K-category assignments by a
K-category correlation coefficient
<https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_.
.. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
<https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_.
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred)
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
if np.isnan(mcc):
return 0.
else:
return mcc
@_deprecate_positional_args
def zero_one_loss(y_true, y_pred, *, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, default=True
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See Also
--------
accuracy_score, hamming_loss, jaccard_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
@_deprecate_positional_args
def f1_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the F1 score, also known as balanced F-score or F-measure.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See Also
--------
fbeta_score, precision_recall_fscore_support, jaccard_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
"""
return fbeta_score(y_true, y_pred, beta=1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight,
zero_division=zero_division)
@_deprecate_positional_args
def fbeta_score(y_true, y_pred, *, beta, labels=None, pos_label=1,
average='binary', sample_weight=None, zero_division="warn"):
"""Compute the F-beta score.
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of recall in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Determines the weight of recall in the combined score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
array([0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight,
zero_division=zero_division)
return f
def _prf_divide(numerator, denominator, metric,
modifier, average, warn_for, zero_division="warn"):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0 or 1 (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# if ``zero_division=1``, set those with denominator == 0 equal to 1
result[mask] = 0.0 if zero_division in ["warn", 0] else 1.0
# the user will be removing warnings if zero_division is set to something
# different than its default value. If we are computing only f-score
# the warning will be raised only if precision and recall are ill-defined
if zero_division != "warn" or metric not in warn_for:
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples. Use ``zero_division`` parameter to
# control this behavior."
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
_warn_prf(average, modifier, msg_start, len(result))
return result
def _warn_prf(average, modifier, msg_start, result_size):
axis0, axis1 = 'sample', 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s. Use `zero_division` parameter to control'
' this behavior.'.format(msg_start, modifier, axis0))
if result_size == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):
"""Validation associated with set-wise metrics.
Returns identified labels.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = unique_labels(y_true, y_pred).tolist()
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It "
f"should be one of {present_labels}"
)
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting, one of %r."
% (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
return labels
@_deprecate_positional_args
def precision_recall_fscore_support(y_true, y_pred, *, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None,
zero_division="warn"):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples','weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(tp_sum, pred_sum, 'precision',
'predicted', average, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall',
'true', average, warn_for, zero_division)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(
average, "true nor predicted", 'F-score is', len(true_sum)
)
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (zero_division_value,
zero_division_value,
zero_division_value,
None)
else:
return (np.float64(0.0),
zero_division_value,
np.float64(0.0),
None)
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
@_deprecate_positional_args
def precision_score(y_true, y_pred, *, labels=None, pos_label=1,
average='binary', sample_weight=None,
zero_division="warn"):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float of shape
(n_unique_labels,)
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> precision_score(y_true, y_pred, average=None)
array([0.33..., 0. , 0. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=1)
array([0.33..., 1. , 1. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight,
zero_division=zero_division)
return p
@_deprecate_positional_args
def recall_score(y_true, y_pred, *, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
Parameter `labels` improved for multiclass problem.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'micro', 'macro', 'samples', 'weighted', 'binary'} \
default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
recall : float (if average is not None) or array of float of shape
(n_unique_labels,)
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
See Also
--------
precision_recall_fscore_support, balanced_accuracy_score,
multilabel_confusion_matrix
Notes
-----
When ``true positive + false negative == 0``, recall returns 0 and raises
``UndefinedMetricWarning``. This behavior can be modified with
``zero_division``.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro')
0.33...
>>> recall_score(y_true, y_pred, average='micro')
0.33...
>>> recall_score(y_true, y_pred, average='weighted')
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([1., 0., 0.])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> recall_score(y_true, y_pred, average=None)
array([0.5, 0. , 0. ])
>>> recall_score(y_true, y_pred, average=None, zero_division=1)
array([0.5, 1. , 1. ])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight,
zero_division=zero_division)
return r
@_deprecate_positional_args
def balanced_accuracy_score(y_true, y_pred, *, sample_weight=None,
adjusted=False):
"""Compute the balanced accuracy.
The balanced accuracy in binary and multiclass classification problems to
deal with imbalanced datasets. It is defined as the average of recall
obtained on each class.
The best value is 1 and the worst value is 0 when ``adjusted=False``.
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
.. versionadded:: 0.20
Parameters
----------
y_true : 1d array-like
Ground truth (correct) target values.
y_pred : 1d array-like
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
adjusted : bool, default=False
When true, the result is adjusted for chance, so that random
performance would score 0, while keeping perfect performance at a score
of 1.
Returns
-------
balanced_accuracy : float
See Also
--------
recall_score, roc_auc_score
Notes
-----
Some literature promotes alternative definitions of balanced accuracy. Our
definition is equivalent to :func:`accuracy_score` with class-balanced
sample weights, and shares desirable properties with the binary case.
See the :ref:`User Guide <balanced_accuracy_score>`.
References
----------
.. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).
The balanced accuracy and its posterior distribution.
Proceedings of the 20th International Conference on Pattern
Recognition, 3121-24.
.. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).
`Fundamentals of Machine Learning for Predictive Data Analytics:
Algorithms, Worked Examples, and Case Studies
<https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.
Examples
--------
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_true = [0, 1, 0, 0, 1, 0]
>>> y_pred = [0, 1, 0, 0, 0, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.625
"""
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
@_deprecate_positional_args
def classification_report(y_true, y_pred, *, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False,
zero_division="warn"):
"""Build a text report showing the main classification metrics.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : list of str of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.20
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy
otherwise and would be the same for all metrics.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See Also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
labels_given = True
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight,
zero_division=zero_division)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
@_deprecate_positional_args
def hamming_loss(y_true, y_pred, *, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
Returns
-------
loss : float or int
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss corresponds to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function, when `normalize` parameter is set to
True.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does not entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes only the
individual labels.
The Hamming loss is upperbounded by the subset zero-one loss, when
`normalize` parameter is set to True. It is always between 0 and 1,
lower being better.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_.
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * y_true.shape[1] * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
@_deprecate_positional_args
def log_loss(y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
r"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of a logistic model that returns ``y_pred`` probabilities
for its training data ``y_true``.
The log loss is only defined for two or more labels.
For a single sample with true label :math:`y \in \{0,1\}` and
and a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log
loss is:
.. math::
L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float, default=1e-15
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, default=True
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Notes
-----
The logarithm used is the natural logarithm (base-e).
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
@_deprecate_positional_args
def hinge_loss(y_true, pred_decision, *, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized).
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array of shape (n_samples,) or (n_samples, n_classes)
Predicted decisions, as output by decision_function (floats).
labels : array-like, default=None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_.
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292.
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(random_state=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision)
0.30...
In the multiclass case:
>>> import numpy as np
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC()
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels=labels)
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(labels if labels is not None else y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
np.clip(losses, 0, None, out=losses)
return np.average(losses, weights=sample_weight)
@_deprecate_positional_args
def brier_score_loss(y_true, y_prob, *, sample_weight=None, pos_label=None):
"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome. The Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). It can be decomposed is the sum of refinement loss and
calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter `pos_label`, which defaults to
the greater label unless `y_true` is all 0 or all -1, in which case
`pos_label` defaults to 1.
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array of shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : int or str, default=None
Label of the positive class. `pos_label` will be infered in the
following manner:
* if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1;
* else if `y_true` contains string, an error will be raised and
`pos_label` should be explicitely specified;
* otherwise, `pos_label` defaults to the greater label,
i.e. `np.unique(y_true)[-1]`.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0)
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
check_consistent_length(y_true, y_prob, sample_weight)
y_type = type_of_target(y_true)
if y_type != "binary":
raise ValueError(
f"Only binary classification is supported. The type of the target "
f"is {y_type}."
)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
try:
pos_label = _check_pos_label_consistency(pos_label, y_true)
except ValueError:
classes = np.unique(y_true)
if classes.dtype.kind not in ('O', 'U', 'S'):
# for backward compatibility, if classes are not string then
# `pos_label` will correspond to the greater label
pos_label = classes[-1]
else:
raise
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
anntzer/scikit-learn
|
sklearn/metrics/_classification.py
|
Python
|
bsd-3-clause
| 97,201
|
[
"Brian"
] |
0c6248cd4e072eeaea7a933ee870985f89f1d407967760792e313f38b5da6538
|
from mumax2 import *
# Standard Problem 4
# define geometry
# number of cells
Nx = 32
Ny = 32
Nz = 32
setgridsize(Nx, Ny, Nz)
# physical size in meters
sizeX = 160e-9
sizeY = 160e-9
sizeZ = 160e-9
setcellsize(sizeX/Nx, sizeY/Ny, sizeZ/Nz)
# load modules
load('exchange6')
load('demag')
load('zeeman')
load('llg')
load('maxtorque')
load('solver/am12')
setv('m_maxabserror', 1e-4)
setv('m_maxrelerror', 1e-4)
setv('maxdt', 1e-10)
setv('mindt', 1e-17)
# set parameters
setv('Msat', 800e3)
setv('Aex', 1.3e-11)
setv('alpha', 0.02)
setv('dt', 1e-12) # initial time step, will adapt
# set magnetization
m=[ [[[1]]], [[[1]]], [[[0]]] ]
setarray('m', m)
#relax
setv('alpha', 1) # high damping for relax
autotabulate(["t", "maxtorque"], "t.dat", 1e-13)
run_until_smaller('maxtorque', 1e-3 * gets('gamma') * gets('msat'))
setv('alpha', 0.02) # restore normal damping
setv('t', 0) # re-set time to 0 so output starts at 0
setv('dt', 1e-15) # restore time step, will adapt again
save("m","vtk",[])
# schedule some output
# save magnetization snapshots in OMF text format every 20ps
# autosave("m", "gplot", [], 1e-12)
# save a table with time and the average magnetization every 10ps
autotabulate(["t", "<m>"], "m.txt", 10e-12)
# run with field
Bx = -24.6E-3
By = 4.3E-3
Bz = 0
setv('B_ext', [Bx, By, Bz])
autotabulate(["t", "m_error"], "error.dat", 1e-13)
run(1e-9)
# some debug output
printstats()
savegraph("graph.png") # see stdprobl4.py.out/graph.dot.png
sync()
|
mumax/2
|
tests/exch-bench.py
|
Python
|
gpl-3.0
| 1,501
|
[
"VTK"
] |
491642f12e1b08d3f7cbd3085cb37bdc08e5f0ffbe615c18c79d1af3b9396ea7
|
import random
from bot import Command, utils, categories
url_settings = {
'cat': ['http://aws.random.cat/meow', ['gato', 'gatito', 'neko'], 'file'],
'dog': ['https://dog.ceo/api/breeds/image/random', ['perro', 'perrito', 'doggo'], 'message'],
'shiba': ['http://shibe.online/api/shibes', ['shibe', 'shibainu'], 0],
'fox': ['https://randomfox.ca/floof/', ['foxxo'], 'image'],
'duck': ['https://random-d.uk/api/random', ['pato'], 'url'],
'bunny': ['https://api.bunnies.io/v2/loop/random/?media=gif', ['conejo'], 'media.gif'],
'owl': ['http://pics.floofybot.moe/owl', ['buho'], 'image'],
}
alias_map = {k: v[1] + [k] for k, v in url_settings.items()}
aliases = [item for x in alias_map.values() for item in x]
class RandomAnimal(Command):
__author__ = 'makzk'
__version__ = '1.0.0'
def __init__(self, bot):
super().__init__(bot)
self.name = 'animal'
self.format = '$[animal-usage]'
self.aliases = aliases
self.category = categories.IMAGES
async def handle(self, cmd):
if cmd.argc < 1 and cmd.cmdname in aliases:
cmd.args = [cmd.cmdname]
cmd.argc = 1
if cmd.argc > 0 and cmd.args[0] == 'help':
return await cmd.send_usage('$[animal-usage-help]', locales={'types': ', '.join(list(url_settings.keys()))})
if cmd.argc < 1:
atype = random.choice(list(url_settings.keys()))
elif cmd.args[0] in aliases:
atype = list(url_settings.keys())[0]
for k, v in alias_map.items():
if cmd.args[0] in v:
atype = k
break
else:
return await cmd.send_usage()
try:
config = url_settings[atype]
await cmd.typing()
async with self.http.get(config[0]) as r:
if r.status == 200:
data = await r.json()
if isinstance(config[2], int):
data = data[config[2]]
else:
for prop in config[2].split('.'):
data = data.get(prop, '')
embed = utils.img_embed(data, f'$[animal-{atype}-title]')
return await cmd.answer(embed)
except Exception as e:
self.log.error(e)
await cmd.answer(f'$[animal-{atype}-error]')
|
jvicu2001/alexis-bot
|
modules/animals.py
|
Python
|
mit
| 2,408
|
[
"MOE"
] |
0fb0a4166779abcf09115e259199474abd5dcc5e36ea0ba76d8fd454b4ce7c77
|
"""
VolumeVisualizationRamp
:Authors:
Berend Klein Haneveld
"""
from VolumeVisualization import VolumeVisualization
from VolumeVisualization import VisualizationTypeRamp
from vtk import vtkVolumeProperty
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtCore import Qt
from core.decorators import overrides
class VolumeVisualizationRamp(VolumeVisualization):
"""VolumeVisualizationRamp is a volume property that just maps values
on a ramp from 0 to 1."""
def __init__(self):
super(VolumeVisualizationRamp, self).__init__()
self.visualizationType = VisualizationTypeRamp
self.volProp = vtkVolumeProperty()
self.volProp.SetIndependentComponents(True)
self.volProp.SetInterpolationTypeToLinear()
self.volProp.ShadeOn()
self.volProp.SetAmbient(0.1)
self.volProp.SetDiffuse(0.9)
self.volProp.SetSpecular(0.2)
self.volProp.SetSpecularPower(10.0)
self.volProp.SetScalarOpacityUnitDistance(0.8919)
@overrides(VolumeVisualization)
def setMapper(self, mapper):
pass
@overrides(VolumeVisualization)
def shaderType(self):
return 0
@overrides(VolumeVisualization)
def updateTransferFunction(self):
# Transfer function and property
self.colorFunction = vtkColorTransferFunction()
self.colorFunction.AddRGBSegment(self.minimum, 0.0, 0.0, 0.0, self.maximum, 1.0, 1.0, 1.0)
self.opacityFunction = vtkPiecewiseFunction()
self.opacityFunction.AddSegment(self.minimum, 0.0, self.maximum, 1.0)
self.volProp.SetColor(self.colorFunction)
self.volProp.SetScalarOpacity(self.opacityFunction)
self.updatedTransferFunction.emit()
@overrides(VolumeVisualization)
def setImageData(self, imageData):
"""
Nothing needs to be done for CT scans. The values of the sliders are
not dependent on the imageData.
:type imageData: vtkImageData
"""
self.minimum, self.maximum = imageData.GetScalarRange()
@overrides(VolumeVisualization)
def getParameterWidget(self):
"""
Returns a widget with sliders / fields with which properties of this
volume property can be adjusted.
:rtype: QWidget
"""
layout = QGridLayout()
layout.setAlignment(Qt.AlignTop)
widget = QWidget()
widget.setLayout(layout)
return widget
@overrides(VolumeVisualization)
def valueChanged(self, value):
"""
Parameter 'value' is unused. This is a callback for all the
interactive widgets in the parameter widget.
"""
self.updateTransferFunction()
|
berendkleinhaneveld/Registrationshop
|
ui/visualizations/VolumeVisualizationRamp.py
|
Python
|
mit
| 2,500
|
[
"VTK"
] |
544c81bd10e39fee3c44e9bb6e6e2b06da1e0ef31ec92304037e1659eb6e43d3
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
# FIXME: According to wikipedia, Kansas only has all federal holidays, except
# the Columbus Day and Washington's Birthday.
# Unfortunately, other sources mention XMas Eve for 2018, but not for other
# years.
# I'm a bit sad here...
#
# Sources to lookup, if you want to help:
# * http://www.admin.ks.gov/docs/default-source/ops/holidays/holidays2018.pdf
# * http://www.kansas.gov/employee/documents/2017calendar.pdf
# * https://publicholidays.us/kansas/2018-dates/
# * https://en.wikipedia.org/wiki/Public_holidays_in_the_United_States#Kansas
# * https://publicholidays.us/kansas/2018-dates/
class Kansas(UnitedStates):
"""Kansas"""
include_federal_presidents_day = False
include_columbus_day = False
|
sayoun/workalendar
|
workalendar/usa/kansas.py
|
Python
|
mit
| 882
|
[
"COLUMBUS"
] |
ddbc456cafd778b8a37f79bf8d72084caf835b1f322f5675573946f0617daf74
|
# Prepare a cut-down version of the World Ocean Atlas 2018 salinity
# data to use with the AddSalinityPreprocessor.
# Input files are 0.25° seasonal files for the years 2005-2017,
# available from https://www.nodc.noaa.gov/cgi-bin/OC5/woa18/woa18.pl
# Files are:
#
# woa18_A5B7_s13_04.nc - Winter (DJF) = Season 1
# woa18_A5B7_s14_04.nc - Spring (MAM) = Season 2
# woa18_A5B7_s15_04.nc - Summer (JJA) = Season 3
# woa18_A5B7_s16_04.nc - Winter (SON) = Season 4
#
#
# Output is a single netCDF file, containing the surface data for the full grid
# and four time steps.
import os
import netCDF4
WINTER_FILE = "woa18_A5B7_s13_04.nc"
SPRING_FILE = "woa18_A5B7_s14_04.nc"
SUMMER_FILE = "woa18_A5B7_s15_04.nc"
AUTUMN_FILE = "woa18_A5B7_s16_04.nc"
IN_VAR = "s_an"
OUTPUT_FILE = "woa18_seasonal_surface_salinity.nc"
def main():
if not init_check():
print("Initialisation check failed.")
exit()
init_output_file()
add_season(WINTER_FILE, 0)
add_season(SPRING_FILE, 1)
add_season(SUMMER_FILE, 2)
add_season(AUTUMN_FILE, 3)
# Initialisation check
def init_check():
check_result = True
if not file_exists(WINTER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
if not file_exists(SUMMER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
return check_result
# See if a file exists
def file_exists(file):
exists = True
if not os.path.isfile(file):
print("Missing file %s" % file)
exists = False
return exists
def init_output_file():
# Get spatial dimensions from input file
nc = netCDF4.Dataset(WINTER_FILE, mode="r")
lons = nc.variables["lon"][:]
lats = nc.variables["lat"][:]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, format="NETCDF4_CLASSIC", mode="w")
nc.createDimension("lon", len(lons))
lon_var = nc.createVariable("lon", "f", "lon", fill_value=-999)
lon_var.units = "degrees_east"
nc.createDimension("lat", len(lats))
lat_var = nc.createVariable("lat", "f", "lat", fill_value=-999)
lat_var.units = "degrees_north"
nc.createDimension("time", 4)
time_var = nc.createVariable("time", "i", "time", fill_value=-1)
time_var.units = "season"
time_var.long_name = "season"
nc.createVariable("salinity", "d", ("time", "lat", "lon"), fill_value=-999)
lon_var[:] = lons
lat_var[:] = lats
time_var[:] = [1, 2, 3, 4]
nc.close()
def add_season(season_file, season):
nc = netCDF4.Dataset(season_file, mode="r")
values = nc.variables[IN_VAR][0, 0, :, :]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, mode="a")
nc.variables["salinity"][season, :, :] = values
nc.close()
if __name__ == '__main__':
main()
|
BjerknesClimateDataCentre/QuinCe
|
external_scripts/NRT/salinity_data/prepare_salinity.py
|
Python
|
gpl-3.0
| 2,813
|
[
"NetCDF"
] |
5135effb96c9ff74bcf8d6c448fea2f303af7ed05cc613542a8f2668f880f2a3
|
from __future__ import print_function
import os
import sys
import numpy as np
from distutils.core import setup, Extension, setup_keywords
from distutils.sysconfig import get_config_var
sys.path += [ "." ]
from config import get_system_config
data_files = []
# data files & folders (folders nest only two levels down, not three anymore)
dirs = ['param','examples']
for dir in dirs:
for item in os.listdir(dir):
fullitem = os.path.join(dir,item)
if os.path.isfile(fullitem):
data_files.append((dir,[fullitem]))
elif os.path.isdir(fullitem) and '.svn' not in fullitem:
for item2 in os.listdir(fullitem):
fullitem2 = os.path.join(fullitem,item2)
if os.path.isfile(fullitem2):
data_files.append((fullitem,[fullitem2]))
data_files.append(('.',['hotbit/hotbit']))
###
inc_dirs = [ ]
lib_dirs = [ ]
libs = [ ]
extra_link = [ ]
extra_compile = [ ]
msgs = [ ]
get_system_config(inc_dirs, libs, lib_dirs,
extra_link, extra_compile,
msgs)
# check for user provided customizations
customize = None
for i, arg in enumerate(sys.argv):
if arg.startswith('--customize'):
arg = sys.argv.pop(i)
try:
customize = arg.split('=')[1]
except IndexError:
customize = 'customize.py'
break
if customize!=None and os.path.isfile(customize):
exec(open(customize).read())
msgs.append('* Using custom system configuration from %s' %customize)
elif customize is not None:
msgs.append('* No custom system configuration in %s' %customize)
# this is probably silly way of doing this:
version = '0.1'
revision=os.popen('svnversion .').readline()[:-1]
f=open('./hotbit/version.py','w').write('hotbit_version = "%s (svn=%s)"\n' %(version,revision))
s=setup(
name = "hotbit",
url = "https://github.com/pekkosk/hotbit",
description = "Density-functional tight-binding calculator for ASE",
author_email = "pekka.koskinen@iki.fi",
version = version,
packages = [
"box",
"hotbit",
"hotbit.analysis",
"hotbit.containers",
"hotbit.coulomb",
"hotbit.io",
"hotbit.parametrization",
"hotbit.test"
],
ext_modules = [
Extension(
"_hotbit",
[ "hotbit/c/_hotbit.c",
"hotbit/c/geig.c",
"hotbit/c/slako.c",
"hotbit/c/spherical.c",
"hotbit/c/multipole.c" ],
include_dirs = inc_dirs,
libraries = libs,
library_dirs = lib_dirs,
extra_compile_args = extra_compile,
extra_link_args = extra_link
)
],
data_files = data_files
)
if 'install' in s.command_options:
try:
home = s.command_options['install']['home'][1]
except KeyError:
# installation without parameter --home
home = os.getenv('HOME')
msgs.append('* No installation directory specified, hotbit installed directly to %s' % home)
#hb = os.path.expanduser('%s/hotbit' %home)
#os.chmod(hb, 0o755)
for msg in msgs:
print(msg)
|
pekkosk/hotbit
|
setup.py
|
Python
|
gpl-2.0
| 3,232
|
[
"ASE"
] |
c49b1bbd15cc02d242fe13e368a0cc5d1302c673c646ad5b3cded0078d824b14
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow.compat.v1 as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = six.ensure_str(name).split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = six.ensure_str(call_str).find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:six.ensure_str(call_str).find("(")]
args = six.ensure_str(call_str[open_paren_index +
1:close_paren_index]).split(",")
args = [six.ensure_str(arg).split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self,
old_file_text,
import_rename=False,
upgrade_compat_v1_import=False):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(
tf_upgrade_v2.TFAPIChangeSpec(
import_rename, upgrade_compat_v1_import=upgrade_compat_v1_import))
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertNotEqual(six.ensure_str(report).find("Failed to parse"), -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(
six.ensure_str(report).find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + six.ensure_str(name))
else:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.fail(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = list(full_dict.keys())
for method_name in method_names:
args = list(full_dict[method_name].keys())
if "contrib" in method_name:
# Skip descending and fetching contrib methods during test. These are
# not available in the repo anymore.
continue
elif six.ensure_str(method_name).startswith("*."):
# special case for optimizer methods
method = six.ensure_str(method_name).replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if six.ensure_str(method_name).startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testInitializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
text = contrib_alias + "layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = contrib_alias + "layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = (contrib_alias + "layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = (contrib_alias + "layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.keras.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testColocateGradientsWithHessians(self):
text = "tf.hessians(ys=a, xs=b, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.hessians(ys=a, xs=b)\n", new_text)
self.assertIn("tf.hessians no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.compat.v1.keras.experimental.export_saved_model(model, "
"'./saved_models')\ntf.compat.v1.keras.experimental."
"load_from_saved_model(saved_model_path)\n"
)
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testStructure(self):
for (text, expected) in [
("tf.data.experimental.DatasetStructure", "tf.data.DatasetSpec"),
("tf.data.experimental.OptionalStructure", "tf.OptionalSpec"),
("tf.data.experimental.RaggedTensorStructure", "tf.RaggedTensorSpec"),
("tf.data.experimental.SparseTensorStructure", "tf.SparseTensorSpec"),
("tf.data.experimental.Structure", "tf.TypeSpec"),
("tf.data.experimental.TensorArrayStructure", "tf.TensorArraySpec"),
("tf.data.experimental.TensorStructure", "tf.TensorSpec"),
]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in ["assert_greater", "assert_equal", "assert_none_equal",
"assert_less", "assert_negative", "assert_positive",
"assert_non_negative", "assert_non_positive", "assert_near",
"assert_less", "assert_less_equal", "assert_greater",
"assert_greater_equal", "assert_integer", "assert_type",
"assert_scalar"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_direct_import_upgrade(self):
text = "contrib_framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated",
report)
def test_contrib_layers_layer_norm_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
_, report, _, _ = self._upgrade(contrib_alias + "layers.layer_norm")
self.assertIn(
"`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook",
"stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_app_flags(self):
text = "flags = tf.app.flags"
expected = "flags = tf.compat.v1.app.flags"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnDilation2d(self):
text = "tf.nn.dilation2d(v, k, s, r, p)"
expected_text = "tf.nn.dilation2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_rename_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = "import tensorflow.compat.v2 as tf\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "import tensorflow as tf, other_import as y\n"
text = import_header + old_symbol
new_import_header = "import tensorflow.compat.v2 as tf, other_import as y\n"
expected_text = new_import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=True, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=False, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import foo\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import foo\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import *\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import *\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow.foo import bar\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2.foo import bar\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("from tensorflow import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("from tensorflow.compat.v2 import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
def test_keras_experimental_export_warning(self):
text = "tf.keras.experimental.export_saved_model"
_, report, _, _ = self._upgrade(text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceNoOutputChangeOnErrorHandling(self):
"""In place file should not be modified when parsing error is handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = "print 'a' \n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(
temp_file.name, temp_file.name, no_change_to_outfile_on_error=True)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceEmptyOutputOnError(self):
"""In place file becomes empty when parsing error is not handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = ""
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
|
davidzchen/tensorflow
|
tensorflow/tools/compatibility/tf_upgrade_v2_test.py
|
Python
|
apache-2.0
| 101,974
|
[
"Gaussian"
] |
989eb0d149db9847a0395b0ba9a03041d42693061d66ed31cc2d171e7876cc06
|
# -*- coding: utf-8 -*-
import os
import tempfile
import types
import json
from mock import patch
from nose.tools import eq_
from helper import TestCase
import appvalidator.constants
from appvalidator.errorbundle import ErrorBundle
from appvalidator.specs.webapps import WebappSpec
import appvalidator.webapp
class TestWebappAccessories(TestCase):
"""
Test that helper functions for webapp manifests work as they are intended
to.
"""
def test_path(self):
"""Test that paths are tested properly for allowances."""
s = WebappSpec("{}", ErrorBundle())
eq_(s._path_valid("*"), False)
eq_(s._path_valid("*", can_be_asterisk=True), True)
eq_(s._path_valid("/foo/bar"), False)
eq_(s._path_valid("/foo/bar", can_be_absolute=True), True)
eq_(s._path_valid("//foo/bar"), False)
eq_(s._path_valid("//foo/bar", can_be_absolute=True), False)
eq_(s._path_valid("//foo/bar", can_be_relative=True), False)
eq_(s._path_valid("http://asdf/"), False)
eq_(s._path_valid("https://asdf/"), False)
eq_(s._path_valid("ftp://asdf/"), False)
eq_(s._path_valid("http://asdf/", can_have_protocol=True), True)
eq_(s._path_valid("https://asdf/", can_have_protocol=True), True)
# No FTP for you!
eq_(s._path_valid("ftp://asdf/", can_have_protocol=True), False)
eq_(s._path_valid("data:asdf"), False)
eq_(s._path_valid("data:asdf", can_be_data=True), True)
class WebappBaseTestCase(TestCase):
def setUp(self):
super(WebappBaseTestCase, self).setUp()
self.listed = False
descr = "Exciting Open Web development action!"
descr += (1024 - len(descr)) * "_"
self.data = {
"version": "1.0",
"name": "MozBall",
"description": descr,
"icons": {
"32": "/img/icon-32.png",
"128": "/img/icon-128.png",
},
"developer": {
"name": "Mozilla Labs",
"url": "http://mozillalabs.com"
},
"installs_allowed_from": [
"https://appstore.mozillalabs.com",
"HTTP://mozilla.com/AppStore"
],
"launch_path": "/index.html",
"locales": {
"es": {
"name": "Foo Bar",
"description": "¡Acción abierta emocionante del desarrollo",
"developer": {
"url": "http://es.mozillalabs.com/"
}
},
"it": {
"description": "Azione aperta emozionante di sviluppo di!",
"developer": {
"url": "http://it.mozillalabs.com/"
}
}
},
"default_locale": "en",
"screen_size": {
"min_width": "600",
"min_height": "300"
},
"required_features": [
"touch", "geolocation", "webgl"
],
"orientation": "landscape",
"fullscreen": "true",
"type": "web",
"precompile": [
"game.js",
"database.js"
],
}
self.resources = [("app_type", "web")]
def make_privileged(self):
self.resources = [("app_type", "privileged"),
("packaged", True)]
self.data["type"] = "privileged"
def analyze(self):
"""Run the webapp tests on the file."""
self.detected_type = appvalidator.constants.PACKAGE_WEBAPP
self.setup_err()
for resource, value in self.resources:
self.err.save_resource(resource, value)
with tempfile.NamedTemporaryFile(delete=False) as t:
if isinstance(self.data, types.StringTypes):
t.write(self.data)
else:
t.write(json.dumps(self.data))
name = t.name
appvalidator.webapp.detect_webapp(self.err, name)
os.unlink(name)
class TestWebapps(WebappBaseTestCase):
def test_pass(self):
"""Test that a bland webapp file throws no errors."""
self.analyze()
self.assert_silent()
output = json.loads(self.err.render_json())
assert "manifest" in output and output["manifest"]
def test_bom(self):
"""Test that a plain webapp with a BOM won't throw errors."""
self.setup_err()
appvalidator.webapp.detect_webapp(
self.err, "tests/resources/unicodehelper/utf8_webapp.json")
self.assert_silent()
def test_fail_parse(self):
"""Test that invalid JSON is reported."""
self.data = "}{"
self.analyze()
self.assert_failed(with_errors=True)
def test_missing_required(self):
"""Test that missing the name element is a bad thing."""
del self.data["name"]
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_name(self):
"""Test that the name element is a string."""
self.data["name"] = ["foo", "bar"]
self.analyze()
self.assert_failed(with_errors=True)
def test_long_name(self):
"""Test that long names are flagged for truncation in Gaia."""
self.data["name"] = "This is a long name."
self.analyze()
self.assert_failed(with_warnings=True)
def test_long_name_with_locale(self):
"""Test that long localized names are flagged for truncation in
Gaia."""
self.data["locales"]["es"]["name"] = "This is a long name."
self.analyze()
self.assert_failed(with_warnings=True)
def test_role(self):
"""Test that app may contain role element."""
self.data["role"] = "input"
self.analyze()
self.assert_silent()
def test_langpack_role_need_languages_target(self):
"""Test that a language-target is needed for langpacks."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req_cond", ))
def test_langpack_languages_target_version_value(self):
"""Test that language-target version is in major.minor format."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_langpack_languages_target_version_value_suffix(self):
"""Test that language-target version can have a dash suffix."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5-xyz"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_langpack_invalid_languages_target_type(self):
"""Test language-target type."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
"languages-target": ["2.2"], # Wrong type.
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_target_wrong_version_type(self):
"""Test that language-target version number has the correct type."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": 2.2 # Wrong type.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_target_wrong_version_value(self):
"""Test that language-target version number has the correct value."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2" # Wrong value.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "value_pattern_fail", ))
def test_langpack_invalid_languages_target(self):
"""Test that language-target manifest has the correct value."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://my.manifest.webapp": "2.2" # Manifest is incorrect.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "not_allowed", ))
def test_langpack_role_need_languages_provided(self):
"""Test that a language-provided is needed for langpacks."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req_cond", ))
def test_langpack_invalid_languages_provided_should_not_be_empty(self):
"""Test that language-provided is not empty."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {}
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_languages_provided_language_should_be_dict(self):
"""Test that language-provided children are dicts."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": []
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_provided_need_revision(self):
"""Test that language-provided revision is present."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req", ))
def test_langpack_invalid_languages_provided_need_apps(self):
"""Test that language-provided apps is present."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req", ))
def test_langpack_invalid_languages_provided_apps(self):
"""Test that language-provided apps should be a dict."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": ["app://blah.gaiamobile.org/manifest.webapp"]
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_provided_apps_empty(self):
"""Test that language-provided apps should be non-empty dict."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_languages_provided_revision(self):
"""Test that language-provided revision should be an int."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": "201411051234", # Wrong type, should be a int.
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_valid_langpack(self):
"""Test that a valid langpack passes validation."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_valid_langpack_30(self):
"""Test that a valid langpack for FxOS 3.0 passes validation."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "3.0"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_languages_target_invalid_for_webapps(self):
"""Test that language-target is invalid for non-langpack webapps."""
self.data.update({
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(
("spec", "webapp", "languages_target_langpacks", ))
def test_languages_provided_invalid_for_webapps(self):
"""Test that language-provided is invalid for non-langpack webapps."""
self.data.update({
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(
("spec", "webapp", "languages_provided_langpacks", ))
def test_langpack_valid_speechdata(self):
"""Test that valid speech-data passes validation."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
},
"speech-data": [
"/de/speech-data/feat.params",
"/de/speech-data/de.dic",
"/de/speech-data/de.dic.dmp",
"/de/speech-data/mdef",
"/de/speech-data/means",
"/de/speech-data/mixture_weights",
"/de/speech-data/noisedict",
"/de/speech-data/sendump",
"/de/speech-data/transition_matrices",
"/de/speech-data/variances"
]
}
},
})
self.analyze()
self.assert_silent()
def test_langpack_invalid_speechdata_empty(self):
"""Test that speech-data should not be empty."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
},
"speech-data": []
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_speechdata_type(self):
"""Test that speech-data should be an array."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
},
"speech-data": {}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_speechdata_descendants_empty(self):
"""Test that speech-data descendants should not be empty."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
},
"speech-data": [
"",
"/de/speech-data/feat.params",
]
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_speechdata_descendants_type(self):
"""Test that speech-data descendants should be strings."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.5"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
},
"speech-data": [
1
]
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_invalid_role(self):
"""Test that app may not contain invalid role element."""
self.data["role"] = "hello"
self.analyze()
self.assert_failed(with_errors=True)
def test_no_homescreen_hosted(self):
"Homescreens must not be hosted apps."
self.data["role"] = "homescreen"
self.analyze()
self.assert_failed(with_errors=True)
def test_homescreen_packaged(self):
"Homescreens must be packaged apps."
self.data["role"] = "homescreen"
self.resources.append(("packaged", True))
self.analyze()
self.assert_silent()
def test_empty_name(self):
"""Test that empty names are not allowed"""
self.data["name"] = None
self.analyze()
self.assert_failed(with_errors=True)
def test_maxlengths(self):
"""Test that certain elements are capped in length."""
self.data["name"] = "%" * 129
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_keys(self):
"""Test that unknown elements are flagged"""
self.data["foobar"] = "hello"
self.analyze()
self.assert_failed(with_warnings=True)
def test_warn_extra_keys(self):
"""Test that extra keys are flagged."""
self.data["locales"]["es"]["foo"] = "hello"
self.analyze()
self.assert_failed(with_warnings=True)
def test_icons_not_dict(self):
"""Test that the icons property is a dictionary."""
self.data["icons"] = ["data:foo/bar.png"]
self.analyze()
self.assert_failed(with_errors=True)
def test_icons_empty(self):
"""Test that no icons doesn't cause a traceback."""
self.data["icons"] = {}
self.analyze()
def test_icons_size(self):
"""Test that webapp icon sizes must be integers."""
self.data["icons"]["foo"] = "/foo.png"
self.analyze()
self.assert_failed(with_errors=True)
def test_icons_data_url(self):
"""Test that webapp icons can be data URLs."""
self.data["icons"]["128"] = "data:foo/bar.png"
self.analyze()
self.assert_silent()
def test_icons_relative_url(self):
"""Test that webapp icons cannot be relative URLs."""
self.data["icons"]["128"] = "foo/bar"
self.analyze()
self.assert_silent()
def test_icons_absolute_url(self):
"""Test that webapp icons can be absolute URLs."""
def test_icon(self, icon):
self.setUp()
self.data["icons"]["128"] = icon
self.analyze()
self.assert_silent()
for icon in ['/foo/bar', 'http://foo.com/bar', 'https://foo.com/bar']:
yield test_icon, self, icon
def test_icons_has_min_selfhosted(self):
del self.data["icons"]["128"]
self.analyze()
self.assert_silent()
def test_icons_has_min_listed(self):
self.listed = True
self.data["installs_allowed_from"] = \
appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS
del self.data["icons"]["128"]
self.analyze()
self.assert_failed(with_errors=True)
def test_no_locales(self):
"""Test that locales are not required."""
del self.data["locales"]
self.analyze()
self.assert_silent()
def test_no_default_locale_no_locales(self):
"""Test that locales are not required if no default_locale."""
del self.data["default_locale"]
del self.data["locales"]
self.analyze()
self.assert_silent()
def test_no_default_locale(self):
"""Test that locales require default_locale."""
del self.data["default_locale"]
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_locale_keys(self):
"""Test that locales only contain valid keys."""
# Banned locale element.
self.data["locales"]["es"]["default_locale"] = "foo"
self.analyze()
self.assert_failed(with_warnings=True)
def test_invalid_locale_keys_missing(self):
"""Test that locales aren't missing any required elements."""
del self.data["locales"]["es"]["name"]
self.analyze()
self.assert_silent()
def test_installs_allowed_from_not_list(self):
"""Test that the installs_allowed_from path is a list."""
self.data["installs_allowed_from"] = "foobar"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_installs_allowed_from_path(self):
"""Test that the installs_allowed_from path is valid."""
self.data["installs_allowed_from"].append("foo/bar")
self.analyze()
self.assert_failed(with_errors=True)
def test_no_amo_installs_allowed_from(self):
"""Test that installs_allowed_from should include Marketplace."""
# self.data does not include a marketplace URL by default.
self.listed = True
self.analyze()
self.assert_failed(with_errors=True)
def test_amo_iaf(self):
"""Test that the various Marketplace URLs work."""
# Test that the Marketplace production URL is acceptable.
self.setUp()
orig_iaf = self.data["installs_allowed_from"]
def test_iaf(self, iaf, url):
self.setUp()
self.data["installs_allowed_from"] = iaf + [url]
self.analyze()
self.assert_silent()
for url in appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS:
yield test_iaf, self, orig_iaf, url
def test_iaf_wildcard(self):
"""Test that installs_allowed_from can contain a wildcard."""
self.listed = True
self.data["installs_allowed_from"].append("*")
self.analyze()
self.assert_silent()
def test_installs_allowed_from_protocol(self):
"""
Test that if the developer includes a URL in the `installs_allowed_from`
parameter that is a valid Marketplace URL but uses HTTP instead of
HTTPS, we flag it as using the wrong protocol and not as an invalid URL.
"""
self.listed = True
bad_url = appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS[0].replace(
"https", "http")
self.data["installs_allowed_from"] = (bad_url, )
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "webapp", "iaf_bad_mrkt_protocol", ))
def test_launch_path_packaged(self):
"""Test that the launch path is present in a packaged app."""
del self.data["launch_path"]
self.resources.append(('packaged', True))
self.analyze()
self.assert_failed(with_errors=True)
def test_launch_path_not_string(self):
"""Test that the launch path is a string."""
self.data["launch_path"] = [123]
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path(self):
"""Test that the launch path is valid."""
self.data["launch_path"] = "data:asdf"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path_protocol(self):
"""Test that the launch path cannot have a protocol."""
self.data["launch_path"] = "http://foo.com/bar"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path_absolute(self):
"""Test that the launch path is absolute."""
self.data["launch_path"] = "/foo/bar"
self.analyze()
self.assert_silent()
def test_widget_deprecated(self):
"""Test that the widget property is deprecated."""
self.data["widget"] = {
"path": "/butts.html",
"width": 100,
"height": 200
}
self.analyze()
self.assert_failed(with_errors=True)
def test_dev_missing(self):
"""Test that the developer property cannot be absent."""
del self.data["developer"]
self.analyze()
self.assert_failed(with_errors=True)
def test_dev_not_dict(self):
"""Test that the developer property must be a dict."""
self.data["developer"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_dev_keys(self):
"""Test that the developer keys are present."""
del self.data["developer"]["name"]
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_dev_url(self):
"""Test that the developer keys are correct."""
self.data["developer"]["url"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_screen_size_missing(self):
"""Test that the 'screen_size' property can be absent."""
del self.data["screen_size"]
self.analyze()
self.assert_silent()
def test_screen_size_is_dict(self):
"""Test that the 'screen_size' property must be a dict."""
self.data["screen_size"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_screen_size_contains_pair(self):
"""Test that 'screen_size' must contain at least one key/value pair."""
self.data["screen_size"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_screen_size_key(self):
"""Test that the 'screen_size' keys are correct."""
self.data["screen_size"]["max_width"] = "500"
self.analyze()
self.assert_failed(with_warnings=True)
def test_bad_screen_size_value(self):
"""Test that the 'screen_size' keys are correct."""
self.data["screen_size"]["min_width"] = "500px"
self.analyze()
self.assert_failed(with_errors=True)
def test_required_screen_size_missing(self):
"""Test that the 'screen_size' property can be absent."""
del self.data["screen_size"]
self.analyze()
self.assert_silent()
def test_required_features_is_list(self):
"""Test that the 'required_features' property must be a list."""
self.data["required_features"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_required_features_missing(self):
"""Test that 'required_features' can be absent."""
del self.data["required_features"]
self.analyze()
self.assert_silent()
def test_required_features_empty(self):
"""Test that 'required_features' can be an empty list."""
self.data["required_features"] = []
self.analyze()
self.assert_silent()
def test_orientation_missing(self):
"""Test that the 'orientation' property can be absent."""
del self.data["orientation"]
self.analyze()
self.assert_silent()
def test_orientation_list(self):
"""Test that the 'orientation' property can be absent."""
self.data["orientation"] = ["portrait", "portrait-secondary"]
self.analyze()
self.assert_silent()
def test_orientation_is_string(self):
"""Test that the 'orientation' property must be a string."""
self.data["orientation"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_cannot_be_empty(self):
"""Test that 'orientation' cannot be an empty string."""
self.data["orientation"] = ""
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_valid_value(self):
"""Test that 'orientation' must have a valid value."""
def test_orientation(self, orientation):
self.setUp()
self.data["orientation"] = orientation
self.analyze()
self.assert_silent()
for key in ("portrait", "landscape", "portrait-secondary",
"landscape-secondary", "portrait-primary",
"landscape-primary"):
yield test_orientation, self, key
def test_orientation_bad_value(self):
"""Test that 'orientation' cannot have an invalid value."""
self.data["orientation"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_empty_list(self):
"""Test that 'orientation' cannot be an empty list."""
self.data["orientation"] = []
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_invalid(self):
"""Test that 'orientation' cannot be a list with invalid values."""
self.data["orientation"] = ["fart"]
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_mixed(self):
"""Test that 'orientation' cannot be a list with mixed values."""
self.data["orientation"] = ["portrait", "fart", "landscape"]
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_type(self):
"""Test that 'orientation' cannot be a list with non-strings."""
self.data["orientation"] = ["portrait", 4]
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_valid(self):
"""Test with 'inputs' entries throw no errors."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text']
},
'siri': {
'name': 'Voice Control',
'description': 'Voice Control Input',
'launch_path': '/vc.html',
'types': ['text', 'url']
}
}
self.analyze()
self.assert_silent()
def test_inputs_dict_empty(self):
"""Test that 'inputs' may not be empty dict."""
self.data['inputs'] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_name(self):
"""Test that 'inputs' with an entry missing 'name'."""
self.data['inputs'] = {
'input1': {
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_description(self):
"""Test that 'inputs' with an entry missing 'description'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'launch_path': '/input1.html',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_launch_path(self):
"""Test that 'inputs' with an entry missing 'launch_path'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_types(self):
"""Test that 'inputs' with an entry missing 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html'
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_empty_types(self):
"""Test that 'inputs' with an entry with empty 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': []
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_invalid_types(self):
"""Test that 'inputs' with an entry with invalid 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['foo']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_locales(self):
"""Test that 'inputs' with an localized entry."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text'],
'locales': {
'es': {
'name': 'foo',
'description': 'bar'
}
}
}
}
self.analyze()
self.assert_silent()
def test_inputs_dict_entry_invalid_locales(self):
"""Test that 'inputs' with an localized entry but contain invalid element."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text'],
'locales': {
'es': {
'name': 'foo',
'description': 'bar',
'foo': 'bar2'
}
}
}
}
self.analyze()
self.assert_failed(with_warnings=True)
def test_fullscreen_missing(self):
"""Test that the 'fullscreen' property can be absent."""
del self.data["fullscreen"]
self.analyze()
self.assert_silent()
def test_fullscreen_is_string(self):
"""Test that the 'fullscreen' property must be a string."""
self.data["fullscreen"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_fullscreen_cannot_be_empty(self):
"""Test that 'fullscreen' cannot be an empty string."""
self.data["fullscreen"] = ""
self.analyze()
self.assert_failed(with_errors=True)
def test_fullscreen_valid_value(self):
"""Test that 'fullscreen' must have a valid value."""
def test_fullscreen(self, value):
self.setUp()
self.data["fullscreen"] = key
self.analyze()
self.assert_silent()
for key in ("true", "false", ):
yield test_fullscreen, self, key
def test_fullscreen_bad_value(self):
"""Test that 'fullscreen' cannot have an invalid value."""
self.data["fullscreen"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_failed(self):
"""Test that the `type` element must be a recognized value."""
self.data["type"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_valid(self):
"""Test that the `type` element doesn't fail with valid values."""
def wrap(self, value):
self.setUp()
self.resources.append(("packaged", value != "web"))
self.data["type"] = value
self.analyze()
self.assert_silent()
for key in ("web", "privileged", "certified", ):
yield wrap, self, key
def test_type_not_certified(self):
"""Test that certified apps cannot be listed in the marketplace."""
self.listed = True
self.data["type"] = "certified"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_web_priv_fail(self):
"""Test that web apps cannot be privileged or certified."""
self.data["type"] = "web"
self.resources.append(("packaged", False))
self.analyze()
self.assert_silent()
def test_type_packaged_priv_fail(self):
"""Test that web apps cannot be privileged or certified."""
self.data["type"] = "privileged"
self.resources.append(("packaged", True))
self.analyze()
self.assert_silent()
###########
# Web activities are tested in tests/test_webapp_activity.py
###########
def test_act_root_type(self):
"""Test that the most basic web activity passes."""
self.data["activities"] = "wrong type"
self.analyze()
self.assert_failed(with_errors=True)
def test_version(self):
"""Test that the version matches the format that we require."""
def wrap(version, passes):
self.setUp()
self.data["version"] = version
self.analyze()
if passes:
self.assert_silent()
else:
self.assert_failed(with_errors=True)
yield wrap, "1.0", True
yield wrap, "1.0.1", True
yield wrap, "Poop", True
yield wrap, "1.0b", True
yield wrap, "*.*", True
yield wrap, "1.5-alpha", True
yield wrap, "1.5_windows", True
yield wrap, "1.5_windows,x64", True
yield wrap, "Mountain Lion", False
yield wrap, "", False
for char in "`~!@#$%^&()+=/|\\<>":
yield wrap, char * 3, False
def set_permissions(self):
"""Fill out the permissions node with every possible permission."""
self.data["permissions"] = {}
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.data["permissions"][perm] = {
"description": "Required to make things good."
}
if perm in WebappSpec.PERMISSIONS_ACCESS:
self.data["permissions"][perm]["access"] = (
WebappSpec.PERMISSIONS_ACCESS[perm][0])
def test_permissions_full(self):
self.set_permissions()
self.analyze()
self.assert_silent()
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.assert_has_permission(perm)
def test_permissions_extra_invalid(self):
self.set_permissions()
self.data["permissions"]["foo"] = {"description": "lol"}
self.analyze()
self.assert_failed(with_errors=True)
assert 'foo' not in self.err.get_resource("permissions")
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.assert_has_permission(perm)
def test_permissions_missing_desc(self):
self.set_permissions()
self.data["permissions"]["alarm"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_missing_access(self):
self.set_permissions()
del self.data["permissions"]["contacts"]["access"]
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_invalid_access(self):
self.set_permissions()
self.data["permissions"]["contacts"]["access"] = "asdf"
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_wrong_access(self):
self.set_permissions()
# This access type isn't available for the `settings` permission.
self.data["permissions"]["settings"]["access"] = "createonly"
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_mobileid(self):
self.set_permissions()
self.data["permissions"]["mobileid"] = {"description": "cause"}
self.analyze()
self.assert_silent()
def test_csp(self):
self.data['csp'] = 'this is the csp policy. it can be a string.'
self.analyze()
self.assert_silent()
def test_description_long(self):
self.data['description'] = 'x' * 1025
self.analyze()
self.assert_failed(with_errors=True)
def test_locale_description_long(self):
self.data['locales']['es']['description'] = u'×' * 1025
self.analyze()
self.assert_failed(with_errors=True)
assert 'locales > es > description' in (
self.err.errors[0]['description'][-1])
def test_appcache_path_packaged(self):
self.data["appcache_path"] = '/foo.bar'
self.analyze()
self.assert_silent()
self.resources.append(("packaged", True))
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_not_list(self):
self.data['messages'] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_obj_not_obj(self):
self.data['messages'] = ["foo"]
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_multiple_keys(self):
self.data['messages'] = [{"a": "1", "b": "2"}]
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_pass(self):
self.data['messages'] = [{"key": "val"}, {"key": "val"}]
self.analyze()
self.assert_silent()
def test_redirects_pass(self):
self.data['redirects'] = [
{"to": "asdf", "from": "qwer"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_silent()
def test_redirects_type(self):
self.data['redirects'] = 'asdf'
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_subtype(self):
self.data['redirects'] = [
'asdf',
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_required_nodes(self):
self.data['redirects'] = [
{"bar": "asdf", "foo": "qwer"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_missing_nodes(self):
self.data['redirects'] = [
{"to": "asdf"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_unprivileged(self):
self.data['origin'] = 'app://domain.com'
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_be_lowercase(self):
self.make_privileged()
self.data['origin'] = 'app://DOMAIN.com'
self.analyze()
self.assert_failed(with_errors=True)
def test_arbitrary_origin(self):
self.make_privileged()
self.data['origin'] = 'app://just-some-identifier-string'
self.analyze()
self.assert_silent()
def test_uuid_origin(self):
self.make_privileged()
self.data['origin'] = 'app://878a1076-130e-46fc-a73f-634394166d14'
self.analyze()
self.assert_silent()
def test_origin_pass(self):
self.make_privileged()
self.data['origin'] = 'app://domain.com'
self.analyze()
self.assert_silent()
def test_origin_dashes(self):
self.make_privileged()
self.data["origin"] = "app://my-domain.com"
self.analyze()
self.assert_silent()
def test_origin_subdomains(self):
self.make_privileged()
self.data["origin"] = "app://sub.domain.com"
self.analyze()
self.assert_silent()
def test_origin_non_fqdn(self):
self.make_privileged()
self.data["origin"] = "app://hello"
self.analyze()
self.assert_silent()
def test_origin_type(self):
self.make_privileged()
self.data["origin"] = 123
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_have_spaces(self):
self.make_privileged()
self.data["origin"] = "app://origin with spaces"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_be_web_safe(self):
self.make_privileged()
self.data["origin"] = "app://control\tchars\ndisallowed"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_have_app_protocol(self):
self.make_privileged()
self.data["origin"] = "random-identifier-without-protocol"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_contain_path(self):
self.make_privileged()
self.data["origin"] = "app://domain.com/path"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_end_in_trailing_slash(self):
self.make_privileged()
self.data["origin"] = "app://domain.com/"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_allowed(self):
for origin in ("app://marketplace.firefox.com",
"app://gamescentre.mozilla.com",
"app://mozilla.org",
"app://system.gaiamobile.org"):
self.make_privileged()
self.data["origin"] = origin
self.analyze()
self.assert_silent()
@patch("appvalidator.specs.webapps.BANNED_ORIGINS", [
"gamescentre.mozilla.com",
"firefox.com",
])
def test_origin_banned(self):
self.make_privileged()
for origin in ("app://gamescentre.mozilla.com",
"app://theconsoleisdead.firefox.com"):
self.data["origin"] = origin
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome(self):
self.data["chrome"] = {"navigation": True}
self.analyze()
self.assert_silent()
def test_chrome_alt(self):
self.data["chrome"] = {"navigation": False}
self.analyze()
self.assert_silent()
def test_chrome_bad_navigation(self):
self.data["chrome"] = {"navigation": 123}
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome_bad_keys(self):
self.data["chrome"] = {"haldo": 123}
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome_bad_type(self):
self.data["chrome"] = []
self.analyze()
self.assert_failed(with_errors=True)
def test_precompile_wrong_format(self):
# "precompile" should be list of files not this weird dict.
self.data["precompile"] = {"foo.js": True}
self.analyze()
self.assert_failed(with_errors=True)
def test_precompile_feature(self):
self.analyze()
self.assert_silent()
self.assert_has_feature('PRECOMPILE_ASMJS')
|
mozilla/app-validator
|
tests/test_webapp.py
|
Python
|
bsd-3-clause
| 55,619
|
[
"exciting"
] |
7d2070c4d405e7a6766aef617491988fe6f5dad5661939f00a5e958a53924910
|
# Copyright (C) 2010, Jesper Friis
# (see accompanying license files for details).
"""Definition of the Spacegroup class.
This module only depends on NumPy and the space group database.
"""
import os
import warnings
import numpy as np
__all__ = ['Spacegroup']
class SpacegroupError(Exception):
"""Base exception for the spacegroup module."""
pass
class SpacegroupNotFoundError(SpacegroupError):
"""Raised when given space group cannot be found in data base."""
pass
class SpacegroupValueError(SpacegroupError):
"""Raised when arguments have invalid value."""
pass
class Spacegroup(object):
"""A space group class.
The instances of Spacegroup describes the symmetry operations for
the given space group.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>>
>>> sg = Spacegroup(225)
>>> print 'Space group', sg.no, sg.symbol
Space group 225 F m -3 m
>>> sg.scaled_primitive_cell
array([[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ]])
>>> sites, kinds = sg.equivalent_sites([[0,0,0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ]])
"""
no = property(
lambda self: self._no,
doc='Space group number in International Tables of Crystallography.')
symbol = property(
lambda self: self._symbol,
doc='Hermann-Mauguin (or international) symbol for the space group.')
setting = property(
lambda self: self._setting,
doc='Space group setting. Either one or two.')
lattice = property(
lambda self: self._symbol[0],
doc="""Lattice type:
P primitive
I body centering, h+k+l=2n
F face centering, h,k,l all odd or even
A,B,C single face centering, k+l=2n, h+l=2n, h+k=2n
R rhombohedral centering, -h+k+l=3n (obverse); h-k+l=3n (reverse)
""")
centrosymmetric = property(
lambda self: self._centrosymmetric,
doc='Whether a center of symmetry exists.')
scaled_primitive_cell = property(
lambda self: self._scaled_primitive_cell,
doc='Primitive cell in scaled coordinates as a matrix with the '
'primitive vectors along the rows.')
reciprocal_cell = property(
lambda self: self._reciprocal_cell,
doc='Tree Miller indices that span all kinematically non-forbidden '
'reflections as a matrix with the Miller indices along the rows.')
nsubtrans = property(
lambda self: len(self._subtrans),
doc='Number of cell-subtranslation vectors.')
def _get_nsymop(self):
"""Returns total number of symmetry operations."""
if self.centrosymmetric:
return 2 * len(self._rotations) * len(self._subtrans)
else:
return len(self._rotations) * len(self._subtrans)
nsymop = property(_get_nsymop, doc='Total number of symmetry operations.')
subtrans = property(
lambda self: self._subtrans,
doc='Translations vectors belonging to cell-sub-translations.')
rotations = property(
lambda self: self._rotations,
doc='Symmetry rotation matrices. The invertions are not included '
'for centrosymmetrical crystals.')
translations = property(
lambda self: self._translations,
doc='Symmetry translations. The invertions are not included '
'for centrosymmetrical crystals.')
def __init__(self, spacegroup, setting=1, datafile=None):
"""Returns a new Spacegroup instance.
Parameters:
spacegroup : int | string | Spacegroup instance
The space group number in International Tables of
Crystallography or its Hermann-Mauguin symbol. E.g.
spacegroup=225 and spacegroup='F m -3 m' are equivalent.
setting : 1 | 2
Some space groups have more than one setting. `setting`
determines Which of these should be used.
datafile : None | string
Path to database file. If `None`, the the default database
will be used.
"""
if isinstance(spacegroup, Spacegroup):
for k, v in spacegroup.__dict__.iteritems():
setattr(self, k, v)
return
if not datafile:
datafile = get_datafile()
f = open(datafile, 'r')
try:
_read_datafile(self, spacegroup, setting, f)
finally:
f.close()
def __repr__(self):
return 'Spacegroup(%d, setting=%d)' % (self.no, self.setting)
def __str__(self):
"""Return a string representation of the space group data in
the same format as found the database."""
retval = []
# no, symbol
retval.append('%-3d %s\n' % (self.no, self.symbol))
# setting
retval.append(' setting %d\n' % (self.setting))
# centrosymmetric
retval.append(' centrosymmetric %d\n' % (self.centrosymmetric))
# primitive vectors
retval.append(' primitive vectors\n')
for i in range(3):
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % (self.scaled_primitive_cell[i, j]))
retval.append('\n')
# primitive reciprocal vectors
retval.append(' reciprocal vectors\n')
for i in range(3):
retval.append(' ')
for j in range(3):
retval.append(' %3d' % (self.reciprocal_cell[i, j]))
retval.append('\n')
# sublattice
retval.append(' %d subtranslations\n' % self.nsubtrans)
for i in range(self.nsubtrans):
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % (self.subtrans[i, j]))
retval.append('\n')
# symmetry operations
nrot = len(self.rotations)
retval.append(' %d symmetry operations (rot+trans)\n' % nrot)
for i in range(nrot):
retval.append(' ')
for j in range(3):
retval.append(' ')
for k in range(3):
retval.append(' %2d' % (self.rotations[i, j, k]))
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % self.translations[i, j])
retval.append('\n')
retval.append('\n')
return ''.join(retval)
def __eq__(self, other):
"""Chech whether *self* and *other* refer to the same
spacegroup number and setting."""
if not isinstance(other, Spacegroup):
other = Spacegroup(other)
return self.no == other.no and self.setting == other.setting
def __index__(self):
return self.no
def get_symop(self):
"""Returns all symmetry operations (including inversions and
subtranslations) as a sequence of (rotation, translation)
tuples."""
symop = []
parities = [1]
if self.centrosymmetric:
parities.append(-1)
for parity in parities:
for subtrans in self.subtrans:
for rot, trans in zip(self.rotations, self.translations):
newtrans = np.mod(trans + subtrans, 1)
symop.append((parity*rot, newtrans))
return symop
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, 2*len(self.rotations), axis=0)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.repeat(self.subtrans, len(self.rotations), axis=0)
return rot, trans
def get_rotations(self):
"""Return all rotations, including inversions for
centrosymmetric crystals."""
if self.centrosymmetric:
return np.vstack((self.rotations, -self.rotations))
else:
return self.rotations
def equivalent_reflections(self, hkl):
"""Return all equivalent reflections to the list of Miller indices
in hkl.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.equivalent_reflections([[0, 0, 2]])
array([[ 0, 0, -2],
[ 0, -2, 0],
[-2, 0, 0],
[ 2, 0, 0],
[ 0, 2, 0],
[ 0, 0, 2]])
"""
hkl = np.array(hkl, dtype='int', ndmin=2)
rot = self.get_rotations()
n, nrot = len(hkl), len(rot)
R = rot.transpose(0, 2, 1).reshape((3*nrot, 3)).T
refl = np.dot(hkl, R).reshape((n*nrot, 3))
ind = np.lexsort(refl.T)
refl = refl[ind]
diff = np.diff(refl, axis=0)
mask = np.any(diff, axis=1)
return np.vstack((refl[mask], refl[-1,:]))
def symmetry_normalised_reflections(self, hkl):
"""Returns an array of same size as *hkl*, containing the
corresponding symmetry-equivalent reflections of lowest
indices.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.symmetry_normalised_reflections([[2, 0, 0], [0, 2, 0]])
array([[ 0, 0, -2],
[ 0, 0, -2]])
"""
hkl = np.array(hkl, dtype=int, ndmin=2)
normalised = np.empty(hkl.shape, int)
R = self.get_rotations().transpose(0, 2, 1)
for i, g in enumerate(hkl):
gsym = np.dot(R, g)
j = np.lexsort(gsym.T)[0]
normalised[i,:] = gsym[j]
return normalised
def unique_reflections(self, hkl):
"""Returns a subset *hkl* containing only the symmetry-unique
reflections.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.unique_reflections([[ 2, 0, 0],
... [ 0, -2, 0],
... [ 2, 2, 0],
... [ 0, -2, -2]])
array([[2, 0, 0],
[2, 2, 0]])
"""
hkl = np.array(hkl, dtype=int, ndmin=2)
hklnorm = self.symmetry_normalised_reflections(hkl)
perm = np.lexsort(hklnorm.T)
iperm = perm.argsort()
xmask = np.abs(np.diff(hklnorm[perm], axis=0)).any(axis=1)
mask = np.concatenate(([True], xmask))
imask = mask[iperm]
return hkl[imask]
def equivalent_sites(self, scaled_positions, ondublicates='error',
symprec=1e-3):
"""Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
ondublicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1]
"""
kinds = []
sites = []
symprec2 = symprec**2
scaled = np.array(scaled_positions, ndmin=2)
for kind, pos in enumerate(scaled):
for rot, trans in self.get_symop():
site = np.mod(np.dot(rot, pos) + trans, 1.)
if not sites:
sites.append(site)
kinds.append(kind)
continue
t = site - sites
mask = np.sum(t*t, 1) < symprec2
if np.any(mask):
ind = np.argwhere(mask)[0][0]
if kinds[ind] == kind:
pass
elif ondublicates == 'keep':
pass
elif ondublicates == 'replace':
kinds[ind] = kind
elif ondublicates == 'warn':
warnings.warn('scaled_positions %d and %d '
'are equivalent'%(kinds[ind], kind))
elif ondublicates == 'error':
raise SpacegroupValueError(
'scaled_positions %d and %d are equivalent'%(
kinds[ind], kind))
else:
raise SpacegroupValueError(
'Argument "ondublicates" must be one of: '
'"keep", "replace", "warn" or "error".')
else:
sites.append(site)
kinds.append(kind)
return np.array(sites), kinds
def symmetry_normalised_sites(self, scaled_positions):
"""Returns an array of same size as *scaled_positions*,
containing the corresponding symmetry-equivalent sites within
the unit cell of lowest indices.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.symmetry_normalised_sites([[0.0, 0.5, 0.5], [1.0, 1.0, 0.0]])
array([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
scaled = np.array(scaled_positions, ndmin=2)
normalised = np.empty(scaled.shape, np.float)
rot, trans = self.get_op()
for i, pos in enumerate(scaled):
sympos = np.dot(rot, pos) + trans
# Must be done twice, see the scaled_positions.py test
sympos %= 1.0
sympos %= 1.0
j = np.lexsort(sympos.T)[0]
normalised[i,:] = sympos[j]
return normalised
def unique_sites(self, scaled_positions, symprec=1e-3, output_mask=False):
"""Returns a subset of *scaled_positions* containing only the
symmetry-unique positions. If *output_mask* is True, a boolean
array masking the subset is also returned.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.unique_sites([[0.0, 0.0, 0.0],
... [0.5, 0.5, 0.0],
... [1.0, 0.0, 0.0],
... [0.5, 0.0, 0.0]])
array([[ 0. , 0. , 0. ],
[ 0.5, 0. , 0. ]])
"""
scaled = np.array(scaled_positions, ndmin=2)
symnorm = self.symmetry_normalised_sites(scaled)
perm = np.lexsort(symnorm.T)
iperm = perm.argsort()
xmask = np.abs(np.diff(symnorm[perm], axis=0)).max(axis=1) > symprec
mask = np.concatenate(([True], xmask))
imask = mask[iperm]
if output_mask:
return scaled[imask], imask
else:
return scaled[imask]
def tag_sites(self, scaled_positions, symprec=1e-3):
"""Returns an integer array of the same length as *scaled_positions*,
tagging all equivalent atoms with the same index.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.tag_sites([[0.0, 0.0, 0.0],
... [0.5, 0.5, 0.0],
... [1.0, 0.0, 0.0],
... [0.5, 0.0, 0.0]])
array([0, 0, 0, 1])
"""
scaled = np.array(scaled_positions, ndmin=2)
scaled %= 1.0
scaled %= 1.0
tags = -np.ones((len(scaled), ), dtype=int)
mask = np.ones((len(scaled), ), dtype=np.bool)
rot, trans = self.get_op()
i = 0
while mask.any():
pos = scaled[mask][0]
sympos = np.dot(rot, pos) + trans
# Must be done twice, see the scaled_positions.py test
sympos %= 1.0
sympos %= 1.0
m = ~np.all(np.any(np.abs(scaled[np.newaxis,:,:] -
sympos[:,np.newaxis,:]) > symprec,
axis=2), axis=0)
assert not np.any((~mask) & m)
tags[m] = i
mask &= ~m
i += 1
return tags
def get_datafile():
"""Return default path to datafile."""
return os.path.join(os.path.dirname(__file__), 'spacegroup.dat')
def format_symbol(symbol):
"""Returns well formatted Hermann-Mauguin symbol as extected by
the database, by correcting the case and adding missing or
removing dublicated spaces."""
fixed = []
s = symbol.strip()
s = s[0].upper() + s[1:].lower()
for c in s:
if c.isalpha():
fixed.append(' ' + c + ' ')
elif c.isspace():
fixed.append(' ')
elif c.isdigit():
fixed.append(c)
elif c == '-':
fixed.append(' ' + c)
elif c == '/':
fixed.append(' ' + c)
s = ''.join(fixed).strip()
return ' '.join(s.split())
#-----------------------------------------------------------------
# Functions for parsing the database. They are moved outside the
# Spacegroup class in order to make it easier to later implement
# caching to avoid reading the database each time a new Spacegroup
# instance is created.
#-----------------------------------------------------------------
def _skip_to_blank(f, spacegroup, setting):
"""Read lines from f until a blank line is encountered."""
while True:
line = f.readline()
if not line:
raise SpacegroupNotFoundError(
'invalid spacegroup %s, setting %i not found in data base' %
( spacegroup, setting ) )
if not line.strip():
break
def _skip_to_nonblank(f, spacegroup, setting):
"""Read lines from f until a nonblank line not starting with a
hash (#) is encountered and returns this and the next line."""
while True:
line1 = f.readline()
if not line1:
raise SpacegroupNotFoundError(
'invalid spacegroup %s, setting %i not found in data base' %
( spacegroup, setting ) )
line1.strip()
if line1 and not line1.startswith('#'):
line2 = f.readline()
break
return line1, line2
def _read_datafile_entry(spg, no, symbol, setting, f):
"""Read space group data from f to spg."""
spg._no = no
spg._symbol = symbol.strip()
spg._setting = setting
spg._centrosymmetric = bool(int(f.readline().split()[1]))
# primitive vectors
f.readline()
spg._scaled_primitive_cell = np.array([map(float, f.readline().split())
for i in range(3)],
dtype=np.float)
# primitive reciprocal vectors
f.readline()
spg._reciprocal_cell = np.array([map(int, f.readline().split())
for i in range(3)],
dtype=np.int)
# subtranslations
spg._nsubtrans = int(f.readline().split()[0])
spg._subtrans = np.array([map(float, f.readline().split())
for i in range(spg._nsubtrans)],
dtype=np.float)
# symmetry operations
nsym = int(f.readline().split()[0])
symop = np.array([map(float, f.readline().split()) for i in range(nsym)],
dtype=np.float)
spg._nsymop = nsym
spg._rotations = np.array(symop[:,:9].reshape((nsym,3,3)), dtype=np.int)
spg._translations = symop[:,9:]
def _read_datafile(spg, spacegroup, setting, f):
if isinstance(spacegroup, int):
pass
elif isinstance(spacegroup, basestring):
#spacegroup = ' '.join(spacegroup.strip().split())
spacegroup = format_symbol(spacegroup)
else:
raise SpacegroupValueError('`spacegroup` must be of type int or str')
while True:
line1, line2 = _skip_to_nonblank(f, spacegroup, setting)
_no,_symbol = line1.strip().split(None, 1)
_symbol = format_symbol(_symbol)
_setting = int(line2.strip().split()[1])
_no = int(_no)
if ((isinstance(spacegroup, int) and _no == spacegroup) or
(isinstance(spacegroup, basestring) and
_symbol == spacegroup)) and _setting == setting:
_read_datafile_entry(spg, _no, _symbol, _setting, f)
break
else:
_skip_to_blank(f, spacegroup, setting)
def parse_sitesym(symlist, sep=','):
"""Parses a sequence of site symmetries in the form used by
International Tables and returns corresponding rotation and
translation arrays.
Example:
>>> symlist = [
... 'x,y,z',
... '-y+1/2,x+1/2,z',
... '-y,-x,-z',
... ]
>>> rot, trans = parse_sitesym(symlist)
>>> rot
array([[[ 1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]],
<BLANKLINE>
[[ 0, -1, 0],
[ 1, 0, 0],
[ 0, 0, 1]],
<BLANKLINE>
[[ 0, -1, 0],
[-1, 0, 0],
[ 0, 0, -1]]])
>>> trans
array([[ 0. , 0. , 0. ],
[ 0.5, 0.5, 0. ],
[ 0. , 0. , 0. ]])
"""
nsym = len(symlist)
rot = np.zeros((nsym, 3, 3), dtype='int')
trans = np.zeros((nsym, 3))
for i, sym in enumerate(symlist):
for j, s in enumerate (sym.split(sep)):
s = s.lower().strip()
while s:
sign = 1
if s[0] in '+-':
if s[0] == '-':
sign = -1
s = s[1:]
if s[0] in 'xyz':
k = ord(s[0]) - ord('x')
rot[i, j, k] = sign
s = s[1:]
elif s[0].isdigit() or s[0] == '.':
n = 0
while n < len(s) and (s[n].isdigit() or s[n] in '/.'):
n += 1
t = s[:n]
s = s[n:]
if '/' in t:
q, r = t.split('/')
trans[i,j] = float(q)/float(r)
else:
trans[i,j] = float(t)
else:
raise SpacegroupValueError(
'Error parsing %r. Invalid site symmetry: %s' %
(s, sym))
return rot, trans
def spacegroup_from_data(no=None, symbol=None, setting=1,
centrosymmetric=None, scaled_primitive_cell=None,
reciprocal_cell=None, subtrans=None, sitesym=None,
rotations=None, translations=None, datafile=None):
"""Manually create a new space group instance. This might be
usefull when reading crystal data with its own spacegroup
definitions."""
if no is not None:
spg = Spacegroup(no, setting, datafile)
elif symbol is not None:
spg = Spacegroup(symbol, setting, datafile)
else:
raise SpacegroupValueError('either *no* or *symbol* must be given')
have_sym = False
if centrosymmetric is not None:
spg._centrosymmetric = bool(centrosymmetric)
if scaled_primitive_cell is not None:
spg._scaled_primitive_cell = np.array(scaled_primitive_cell)
if reciprocal_cell is not None:
spg._reciprocal_cell = np.array(reciprocal_cell)
if subtrans is not None:
spg._subtrans = np.atleast_2d(subtrans)
spg._nsubtrans = spg._subtrans.shape[0]
if sitesym is not None:
spg._rotations, spg._translations = parse_sitesym(sitesym)
have_sym = True
if rotations is not None:
spg._rotations = np.atleast_3d(rotations)
have_sym = True
if translations is not None:
spg._translations = np.atleast_2d(translations)
have_sym = True
if have_sym:
if spg._rotations.shape[0] != spg._translations.shape[0]:
raise SpacegroupValueError('inconsistent number of rotations and '
'translations')
spg._nsymop = spg._rotations.shape[0]
return spg
#-----------------------------------------------------------------
# Self test
if __name__ == '__main__':
# Import spacegroup in order to ensure that __file__ is defined
# such that the data base can be found.
import spacegroup
import doctest
print 'doctest: ', doctest.testmod()
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/lattice/spacegroup/spacegroup.py
|
Python
|
gpl-2.0
| 25,861
|
[
"ASE",
"CRYSTAL"
] |
68dc83fa27ab74c42aa105cc22f572d655ee798a924632bf2f0738f2f49ea0ba
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle, re
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'--bootloader', action='store_true', default=False, help='configure for bootloader')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
f4f7_vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
f1_vtypes = ['CRL', 'CRH', 'ODR']
f1_input_sigs = ['RX', 'MISO', 'CTS']
f1_output_sigs = ['TX', 'MOSI', 'SCK', 'RTS', 'CH1', 'CH2', 'CH3', 'CH4']
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
vtypes = []
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# list of ROMFS files
romfs = []
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def setup_mcu_type_defaults():
'''setup defaults for given mcu type'''
global pincount, ports, portmap, vtypes
lib = get_mcu_lib(mcu_type)
if hasattr(lib, 'pincount'):
pincount = lib.pincount
if mcu_series == "STM32F100":
vtypes = f1_vtypes
else:
vtypes = f4f7_vtypes
ports = pincount.keys()
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
if hasattr(lib, "AltFunction_map"):
alt_map = lib.AltFunction_map
else:
# just check if Alt Func is available or not
for l in af_labels:
if function.startswith(l):
return 0
return None
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
global mcu_series
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
if type == 'OUTPUT':
self.sig_dir = 'OUTPUT'
else:
self.sig_dir = 'INPUT'
if mcu_series == "STM32F100" and self.label is not None:
self.f1_pin_setup()
def f1_pin_setup(self):
for l in af_labels:
if self.label.startswith(l):
if self.label.endswith(tuple(f1_input_sigs)):
self.sig_dir = 'INPUT'
self.extra.append('FLOATING')
elif self.label.endswith(tuple(f1_output_sigs)):
self.sig_dir = 'OUTPUT'
else:
error("Unknown signal type %s:%s for %s!" % (self.portpin, self.label, mcu_type))
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS') or
self.label.endswith('_RTS'))):
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR_F1(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
if self.type == 'OUTPUT':
v = 'LOW'
for e in self.extra:
if e in values:
v = e
#for some controllers input pull up down is selected by ODR
if self.type == "INPUT":
v = 'LOW'
if 'PULLUP' in self.extra:
v = "HIGH"
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
if mcu_series == "STM32F100":
return self.get_ODR_F1()
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def get_CR(self):
'''return CR FLAGS'''
#Check Speed
if self.sig_dir != "INPUT":
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
else:
speed_str = ""
#Check Alternate function
if self.type.startswith('I2C'):
v = "AF_OD"
elif self.sig_dir == 'OUTPUT':
if self.af is not None:
v = "AF_PP"
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.is_RTS():
v = "OUTPUT_PP"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CRH(self):
if self.pin < 8:
return None
return self.get_CR()
def get_CRL(self):
if self.pin >= 8:
return None
return self.get_CR()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
def get_config(name, column=0, required=True, default=None, type=None, spaces=False):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
if spaces:
ret = ' '.join(config[name][column:])
else:
ret = config[name][column]
if type is not None:
if type == int and ret.startswith('0x'):
try:
ret = int(ret,16)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
else:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def get_mcu_config(name, required=False):
'''get a value from the mcu dictionary'''
lib = get_mcu_lib(mcu_type)
if not hasattr(lib, 'mcu'):
error("Missing mcu config for %s" % mcu_type)
if not name in lib.mcu:
if required:
error("Missing required mcu config %s for %s" % (name, mcu_type))
return None
return lib.mcu[name]
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def has_sdcard_spi():
'''check for sdcard connected to spi bus'''
for dev in spidev:
if(dev[0] == 'sdcard'):
return True
return False
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
f.write('#define %s\n\n' % get_config('MCU', 1))
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
elif have_type_prefix('SDMMC'):
f.write('// SDMMC available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
f.write('#define STM32_SDC_USE_SDMMC1 TRUE\n')
build_flags.append('USE_FATFS=yes')
elif has_sdcard_spi():
f.write('// MMC via SPI available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_MMC_SPI TRUE\n')
f.write('#define HAL_USE_SDC FALSE\n')
f.write('#define HAL_SDCARD_SPI_HOOK TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN'):
enable_can(f)
if get_config('PROCESS_STACK', required=False):
env_vars['PROCESS_STACK'] = get_config('PROCESS_STACK')
else:
env_vars['PROCESS_STACK'] = "0x2000"
if get_config('MAIN_STACK', required=False):
env_vars['MAIN_STACK'] = get_config('MAIN_STACK')
else:
env_vars['MAIN_STACK'] = "0x400"
if get_config('IOMCU_FW', required=False):
env_vars['IOMCU_FW'] = get_config('IOMCU_FW')
else:
env_vars['IOMCU_FW'] = 0
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
f.write('#define CRT1_AREAS_NUMBER 1\n')
# get core-coupled-memory if available (not be DMA capable)
ccm_size = get_mcu_config('CCM_RAM_SIZE_KB')
if ccm_size is not None:
f.write('\n// core-coupled memory\n')
f.write('#define CCM_RAM_SIZE_KB %u\n' % ccm_size)
f.write('#define CCM_BASE_ADDRESS 0x%08x\n' % get_mcu_config('CCM_BASE_ADDRESS', True))
# get DTCM memory if available (DMA-capable with no cache flush/invalidate)
dtcm_size = get_mcu_config('DTCM_RAM_SIZE_KB')
if dtcm_size is not None:
f.write('\n// DTCM memory\n')
f.write('#define DTCM_RAM_SIZE_KB %u\n' % dtcm_size)
f.write('#define DTCM_BASE_ADDRESS 0x%08x\n' % get_mcu_config('DTCM_BASE_ADDRESS', True))
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('\n// location of loaded firmware\n')
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % (0x08000000 + flash_reserve_start*1024))
f.write('\n')
ram_size_kb = get_mcu_config('RAM_SIZE_KB', True)
ram_base_address = get_mcu_config('RAM_BASE_ADDRESS', True)
f.write('// main memory size and address\n')
f.write('#define HAL_RAM_SIZE_KB %uU\n' % ram_size_kb)
f.write('#define HAL_RAM_BASE_ADDRESS 0x%08x\n' % ram_base_address)
f.write('\n// CPU serial number (12 bytes)\n')
f.write('#define UDID_START 0x%08x\n\n' % get_mcu_config('UDID_START', True))
f.write('\n// APJ board ID (for bootloaders)\n')
f.write('#define APJ_BOARD_ID %s\n' % get_config('APJ_BOARD_ID'))
lib = get_mcu_lib(mcu_type)
build_info = lib.build
if mcu_series == "STM32F100":
cortex = "cortex-m3"
env_vars['CPU_FLAGS'] = ["-mcpu=%s" % cortex]
build_info['MCU'] = cortex
else:
cortex = "cortex-m4"
env_vars['CPU_FLAGS'] = [ "-mcpu=%s" % cortex, "-mfpu=fpv4-sp-d16", "-mfloat-abi=hard"]
build_info['MCU'] = cortex
if not args.bootloader:
env_vars['CPU_FLAGS'].append('-u_printf_float')
build_info['ENV_UDEFS'] = "-DCHPRINTF_USE_FLOAT=1"
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
# setup for bootloader build
if args.bootloader:
f.write('''
#define HAL_BOOTLOADER_BUILD TRUE
#define HAL_USE_ADC FALSE
#define HAL_USE_EXT FALSE
#define HAL_NO_UARTDRIVER
#define HAL_NO_PRINTF
#define HAL_NO_CCM
#define CH_DBG_STATISTICS FALSE
#define CH_CFG_USE_TM FALSE
#define CH_CFG_USE_REGISTRY FALSE
#define CH_CFG_USE_WAITEXIT FALSE
#define CH_CFG_USE_DYNAMIC FALSE
#define CH_CFG_USE_MEMPOOLS FALSE
#define CH_CFG_USE_OBJ_FIFOS FALSE
#define CH_DBG_FILL_THREADS FALSE
#define CH_CFG_USE_SEMAPHORES FALSE
#define CH_CFG_USE_HEAP FALSE
#define CH_CFG_USE_MUTEXES FALSE
#define CH_CFG_USE_CONDVARS FALSE
#define CH_CFG_USE_CONDVARS_TIMEOUT FALSE
#define CH_CFG_USE_EVENTS FALSE
#define CH_CFG_USE_EVENTS_TIMEOUT FALSE
#define CH_CFG_USE_MESSAGES FALSE
#define CH_CFG_USE_MAILBOXES FALSE
#define CH_CFG_USE_FACTORY FALSE
#define CH_CFG_USE_MEMCORE FALSE
#define HAL_USE_I2C FALSE
#define HAL_USE_PWM FALSE
''')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_USE_MAX_KB', type=int, default=0)
if flash_size == 0:
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
env_vars['FLASH_RESERVE_START_KB'] = str(flash_reserve_start)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram size
ram_size = get_mcu_config('RAM_SIZE_KB', True)
ram_base = get_mcu_config('RAM_BASE_ADDRESS', True)
flash_base = 0x08000000 + flash_reserve_start * 1024
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x%08x, len = %uk
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram_base, ram_size))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
default_product = "%BOARD%"
if args.bootloader:
default_product += "-BL"
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default=default_product))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_TX_DMA_STREAM, STM32_SPI_SPI%u_RX_DMA_STREAM }\n'
% (n, n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def write_UART_config(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
idx = 0
for dev in uart_list:
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
idx += 1
for idx in range(len(uart_list), 7):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s}\n" %
(dev, dev, rts_line))
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver and not args.bootloader:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL FALSE
#endif
''')
def write_UART_config_bootloader(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
devlist = []
have_uart = False
for u in uart_list:
if u.startswith('OTG'):
devlist.append('(BaseChannel *)&SDU1')
else:
unum = int(u[-1])
devlist.append('(BaseChannel *)&SD%u' % unum)
have_uart = True
f.write('#define BOOTLOADER_DEV_LIST %s\n' % ','.join(devlist))
if not have_uart:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('#define HAL_USE_I2C FALSE\n')
return
if not 'I2C_ORDER' in config:
error("Missing I2C_ORDER config")
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write('''
#if defined(STM32_I2C_I2C%u_RX_DMA_STREAM) && defined(STM32_I2C_I2C%u_TX_DMA_STREAM)
#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM }
#else
#define HAL_I2C%u_CONFIG { &I2CD%u, SHARED_DMA_NONE, SHARED_DMA_NONE }
#endif
'''
% (n, n, n, n, n, n, n, n))
if dev + "_SCL" in bylabel:
p = bylabel[dev + "_SCL"]
f.write(
'#define HAL_%s_SCL_AF %d\n' % (dev, p.af)
)
f.write('\n#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def parse_timer(str):
'''parse timer channel string, i.e TIM8_CH2N'''
result = re.match(r'TIM([0-9]*)_CH([1234])(N?)', str)
if result:
tim = int(result.group(1))
chan = int(result.group(2))
compl = result.group(3) == 'N'
if tim < 1 or tim > 17:
error("Bad timer number %s in %s" % (tim, str))
return (tim, chan, compl)
else:
error("Bad timer definition %s" % str)
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out:
print("No PWM output defined")
f.write('#define HAL_USE_PWM FALSE\n')
if rc_in is not None:
(n, chan, compl) = parse_timer(rc_in.label)
if compl:
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
if chan not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % chan)
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, chan))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, chan))
f.write('\n')
if rc_in_int is not None:
(n, chan, compl) = parse_timer(rc_in_int.label)
if compl:
error('Complementary channel is not supported for RCININT %s' % rc_in_int.label)
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % chan)
f.write('\n')
if alarm is not None:
(n, chan, compl) = parse_timer(alarm.label)
if compl:
error("Complementary channel is not supported for ALARM %s" % alarm.label)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3:])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3:])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [ 0, 0, 0, 0 ]
pal_lines = [ '0', '0', '0', '0' ]
for p in pwm_out:
if p.type != t:
continue
(n, chan, compl) = parse_timer(p.label)
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if compl:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#ifdef STM32_TIM_TIM%u_UP_DMA_STREAM
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
gpioset = set()
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
if gpio in gpioset:
error("Duplicate GPIO value %u" % gpio)
gpioset.add(gpio)
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU)}, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
last_label = None
for l in sorted(list(set(bylabel.keys()))):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
if label == last_label:
continue
last_label = label
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def bootloader_path():
# always embed a bootloader if it is available
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
hwdef_dirname = os.path.basename(os.path.dirname(args.hwdef))
bootloader_filename = "%s_bl.bin" % (hwdef_dirname,)
bootloader_path = os.path.join(rootdir,
"Tools",
"bootloaders",
bootloader_filename)
if os.path.exists(bootloader_path):
return os.path.realpath(bootloader_path)
return None
def add_bootloader():
'''added bootloader to ROMFS'''
bp = bootloader_path()
if bp is not None:
romfs.append( ("bootloader.bin", bp) )
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
env_vars['ROMFS_FILES'] = romfs
def write_prototype_file():
'''write the prototype file for apj generation'''
pf = open(os.path.join(outdir, "apj.prototype"), "w")
pf.write('''{
"board_id": %s,
"magic": "PX4FWv1",
"description": "Firmware for the %s board",
"image": "",
"build_time": 0,
"summary": "PX4FMUv3",
"version": "0.1",
"image_size": 0,
"git_identity": "",
"board_revision": 0
}
''' % (get_config('APJ_BOARD_ID'),
get_config('APJ_BOARD_TYPE', default=mcu_type)))
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
dstr = 'STM32_SERIAL_USE_%-6s' % type
f.write('#ifndef %s\n' % dstr)
f.write('#define %s TRUE\n' % dstr)
f.write('#endif\n')
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
''')
write_mcu_config(f)
write_USB_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_peripheral_enable(f)
write_prototype_file()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default='TIM* SPI*', spaces=True),
dma_noshare=get_config('DMA_NOSHARE',default='', spaces=True))
if not args.bootloader:
write_PWM_config(f)
write_I2C_config(f)
write_UART_config(f)
else:
write_UART_config_bootloader(f)
add_bootloader()
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
if mcu_series == 'STM32F100':
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_OUTPUT_PP(n) (0 << (((n) & 7) * 4))
#define PIN_MODE_OUTPUT_OD(n) (4 << (((n) & 7) * 4))
#define PIN_MODE_AF_PP(n) (8 << (((n) & 7) * 4))
#define PIN_MODE_AF_OD(n) (12 << (((n) & 7) * 4))
#define PIN_MODE_ANALOG(n) (0 << (((n) & 7) * 4))
#define PIN_MODE_NOPULL(n) (4 << (((n) & 7) * 4))
#define PIN_MODE_PUD(n) (8 << (((n) & 7) * 4))
#define PIN_SPEED_MEDIUM(n) (1 << (((n) & 7) * 4))
#define PIN_SPEED_LOW(n) (2 << (((n) & 7) * 4))
#define PIN_SPEED_HIGH(n) (3 << (((n) & 7) * 4))
#define PIN_ODR_HIGH(n) (1 << (((n) & 15)))
#define PIN_ODR_LOW(n) (0 << (((n) & 15)))
#define PIN_PULLUP(n) (1 << (((n) & 15)))
#define PIN_PULLDOWN(n) (0 << (((n) & 15)))
#define PIN_UNDEFINED(n) PIN_INPUT_PUD(n)
''')
else:
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
ptx = type + "_TX"
prx = type + "_RX"
peripherals.append(ptx)
peripherals.append(prx)
if not ptx in bylabel:
bylabel[ptx] = p
if not prx in bylabel:
bylabel[prx] = p
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if not label in peripherals and not p.has_extra('NODMA'):
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
if os.path.exists(defaults_filename) and not args.bootloader:
print("Adding defaults.parm")
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def romfs_add(romfs_filename, filename):
'''add a file to ROMFS'''
romfs.append((romfs_filename, filename))
def romfs_wildcard(pattern):
'''add a set of files to ROMFS by wildcard'''
base_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
(pattern_dir, pattern) = os.path.split(pattern)
for f in os.listdir(os.path.join(base_path, pattern_dir)):
if fnmatch.fnmatch(f, pattern):
romfs.append((f, os.path.join(pattern_dir, f)))
def process_line(line):
'''process one line of pin definition file'''
global allpins
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
error("Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type, mcu_series
mcu_type = a[2]
mcu_series = a[1]
setup_mcu_type_defaults()
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'ROMFS':
romfs_add(a[1],a[2])
if a[0] == 'ROMFS_WILDCARD':
romfs_wildcard(a[1])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] in line:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
|
night-ghost/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 49,714
|
[
"CRYSTAL"
] |
3a76977591b8cafbd8e732d0fded15d62a23dae460295c39fa7fe3b767e5e2c6
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMzid(RPackage):
"""A parser for mzIdentML files implemented using the XML package. The
parser tries to be general and able to handle all types of mzIdentML
files with the drawback of having less 'pretty' output than a vendor
specific parser. Please contact the maintainer with any problems and
supply an mzIdentML file so the problems can be fixed quickly."""
homepage = "https://www.bioconductor.org/packages/mzID/"
git = "https://git.bioconductor.org/packages/mzID.git"
version('1.14.0', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-mzid/package.py
|
Python
|
lgpl-2.1
| 2,214
|
[
"Bioconductor"
] |
f8025f3c36e34a63817c8182d3ac06ed3c464a24c5087f9dad9c4638a3c032e0
|
#!/usr/bin/env python
# Family size distribution of tags which were aligned to the reference genome
#
# Author: Monika Heinzl & Gundula Povysil, Johannes-Kepler University Linz (Austria)
# Contact: monika.heinzl@edumail.at
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS,
# a BAM file with tags of reads that overlap the regions of the reference genome and
# an optional BED file with chromosome, start and stop position of the regions as input.
# The program produces a plot which shows the distribution of family sizes of the tags from the input files and
# a tabular file with the data of the plot.
# USAGE: python FSD_regions.py --inputFile filenameSSCS --inputName1 filenameSSCS
# --bamFile DCSbamFile --rangesFile BEDfile --output_tabular outptufile_name_tabular
# --output_pdf outputfile_name_pdf
import argparse
import collections
import os.path
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of tags which were aligned to regions of the reference genome')
parser.add_argument('--inputFile', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--bamFile', help='BAM file with aligned reads.')
parser.add_argument('--rangesFile', default=None, help='BED file with chromosome, start and stop positions.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the pdf and tabular file.')
return parser
def compare_read_families_refGenome(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile
name1 = args.inputName1
name1 = name1.split(".tabular")[0]
bamFile = args.bamFile
rangesFile = args.rangesFile
title_file = args.output_pdf
title_file2 = args.output_tabular
sep = "\t"
with open(title_file2, "w") as output_file, PdfPages(title_file) as pdf:
data_array = readFileReferenceFree(firstFile, "\t")
bamIndex = f"{bamFile}.bai"
if not os.path.exists(bamIndex):
print(f"Info: Generating BAM index in {bamIndex}")
pysam.index(bamFile)
bam = pysam.AlignmentFile(bamFile, "rb")
qname_dict = collections.OrderedDict()
if rangesFile is not None:
with open(rangesFile, 'r') as regs:
range_array = np.genfromtxt(regs, skip_header=0, delimiter='\t', comments='#', dtype=str)
if range_array.ndim == 0:
print("Error: file has 0 lines")
exit(2)
if range_array.ndim == 1:
chrList = range_array[0]
start_posList = range_array[1].astype(int)
stop_posList = range_array[2].astype(int)
chrList = [chrList.tolist()]
start_posList = [start_posList.tolist()]
stop_posList = [stop_posList.tolist()]
else:
chrList = range_array[:, 0]
start_posList = range_array[:, 1].astype(int)
stop_posList = range_array[:, 2].astype(int)
if len(start_posList) != len(stop_posList):
print("start_positions and end_positions do not have the same length")
exit(3)
chrList = np.array(chrList)
start_posList = np.array(start_posList).astype(int)
stop_posList = np.array(stop_posList).astype(int)
for chr, start_pos, stop_pos in zip(chrList, start_posList, stop_posList):
chr_start_stop = "{}_{}_{}".format(chr, start_pos, stop_pos)
qname_dict[chr_start_stop] = []
for read in bam.fetch(chr, start_pos, stop_pos):
if not read.is_unmapped:
if re.search('_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
qname_dict[chr_start_stop].append(tags)
else:
for read in bam.fetch():
if not read.is_unmapped:
if re.search(r'_', read.query_name):
tags = re.split('_', read.query_name)[0]
else:
tags = read.query_name
if read.reference_name not in qname_dict:
qname_dict[read.reference_name] = [tags]
else:
qname_dict[read.reference_name].append(tags)
seq = np.array(data_array[:, 1])
tags = np.array(data_array[:, 2])
quant = np.array(data_array[:, 0]).astype(int)
group = np.array(list(qname_dict.keys()))
all_ab = seq[np.where(tags == "ab")[0]]
all_ba = seq[np.where(tags == "ba")[0]]
quant_ab = quant[np.where(tags == "ab")[0]]
quant_ba = quant[np.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab))
seqDic_ba = dict(zip(all_ba, quant_ba))
lst_ab = []
lst_ba = []
quantAfterRegion = []
length_regions = 0
for i in group:
lst_ab_r = []
lst_ba_r = []
seq_mut = qname_dict[i]
if rangesFile is None:
seq_mut, seqMut_index = np.unique(np.array(seq_mut), return_index=True)
length_regions = length_regions + len(seq_mut) * 2
for r in seq_mut:
count_ab = seqDic_ab.get(r)
count_ba = seqDic_ba.get(r)
lst_ab_r.append(count_ab)
lst_ab.append(count_ab)
lst_ba_r.append(count_ba)
lst_ba.append(count_ba)
dataAB = np.array(lst_ab_r)
dataBA = np.array(lst_ba_r)
bigFamilies = np.where(dataAB > 20)[0]
dataAB[bigFamilies] = 22
bigFamilies = np.where(dataBA > 20)[0]
dataBA[bigFamilies] = 22
quantAll = np.concatenate((dataAB, dataBA))
quantAfterRegion.append(quantAll)
quant_ab = np.array(lst_ab)
quant_ba = np.array(lst_ba)
maximumX = np.amax(np.concatenate(quantAfterRegion))
minimumX = np.amin(np.concatenate(quantAfterRegion))
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
colors = ["#6E6E6E", "#0431B4", "#5FB404", "#B40431", "#F4FA58", "#DF7401", "#81DAF5"]
col = []
for i in range(0, len(group)):
col.append(colors[i])
counts = plt.hist(quantAfterRegion, bins=range(minimumX, maximumX + 1), stacked=False, label=group,
align="left", alpha=1, color=col, edgecolor="black", linewidth=1)
ticks = np.arange(minimumX - 1, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(np.array(ticks), ticks1)
count = np.bincount([int(_) for _ in quant_ab]) # original counts
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.15, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}".format(max(map(int, quant_ab)), count[len(count) - 1], float(count[len(count) - 1]) / sum(count), sum(np.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = np.bincount([int(_) for _ in quant_ba]) # original counts
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(map(int, quant_ba)), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
plt.text(0.55, 0.2125, "total nr. of tags:", size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.2125, "{:,} ({:,})".format(length_regions, length_regions / 2), size=11,
transform=plt.gcf().transFigure)
legend4 = "* In the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n"
plt.text(0.1, 0.01, legend4, size=11, transform=plt.gcf().transFigure)
space = 0
for i, count in zip(group, quantAfterRegion):
plt.text(0.55, 0.15 - space, "{}:\n".format(i), size=11, transform=plt.gcf().transFigure)
plt.text(0.8, 0.15 - space, "{:,}\n".format(len(count) / 2), size=11, transform=plt.gcf().transFigure)
space = space + 0.02
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
output_file.write("Dataset:{}{}\n".format(sep, name1))
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(map(int, quant_ab)), sep, max(map(int, quant_ba))))
output_file.write("absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write("relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep, float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("total nr. of reads{}{}\n".format(sep, sum(np.array(data_array[:, 0]).astype(int))))
output_file.write("total nr. of tags{}{} ({})\n".format(sep, length_regions, length_regions / 2))
output_file.write("\n\nValues from family size distribution\n")
output_file.write("{}".format(sep))
for i in group:
output_file.write("{}{}".format(i, sep))
output_file.write("\n")
j = 0
for fs in counts[1][0:len(counts[1]) - 1]:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(group) == 1:
output_file.write("{}{}".format(int(counts[0][j]), sep))
else:
for n in range(len(group)):
output_file.write("{}{}".format(int(counts[0][n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(group) == 1:
output_file.write("{}{}".format(int(sum(counts[0])), sep))
else:
for i in counts[0]:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
output_file.write("\n\nIn the plot, both family sizes of the ab and ba strands were used.\nWhereas the total numbers indicate only the single count of the tags per region.\n")
output_file.write("Region{}total nr. of tags per region\n".format(sep))
for i, count in zip(group, quantAfterRegion):
output_file.write("{}{}{}\n".format(i, sep, len(count) / 2))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_refGenome(sys.argv))
|
natefoo/tools-iuc
|
tools/fsd/fsd_regions.py
|
Python
|
mit
| 12,083
|
[
"pysam"
] |
2e8677f8adbc78dc9e33e82cb911926c30f0b74fd2b429110abde99938ce5a9d
|
# Authors : Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License : BSD-3-Clause
from copy import deepcopy
import numpy as np
from ..io.pick import _pick_data_channels, pick_info
from ..utils import verbose, warn, fill_doc, _validate_type
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function."""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(np.ceil(np.log2(n_times)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
if n_times < n_fft:
warn('The input signal is shorter ({}) than "n_fft" ({}). '
'Applying zero padding.'.format(x_in.shape[-1], n_fft))
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
else:
zero_pad = 0
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell Gaussian windows (in the freq domain)."""
from scipy.fft import fft, fftfreq
tw = fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex128)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fft(window)
return windows
def _st(x, start_f, windows):
"""Compute ST based on Ali Moukadem MATLAB code (used in tests)."""
from scipy.fft import fft, ifft
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128)
# do the work
Fx = fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function."""
from scipy.fft import fft, ifft
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = ifft(XX[:, f:f + n_samp] * window)
if zero_pad > 0:
TFR = ST[:, :-zero_pad:decim]
else:
TFR = ST[:, ::decim]
TFR_abs = np.abs(TFR)
TFR_abs[TFR_abs == 0] = 1.
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
@fill_doc
def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1):
"""Compute power and intertrial coherence using Stockwell (S) transform.
Same computation as `~mne.time_frequency.tfr_stockwell`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
data : ndarray, shape (n_epochs, n_channels, n_times)
The signal to transform.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
%(n_jobs)s
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
See Also
--------
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
References
----------
.. footbibliography::
"""
from scipy.fft import fftfreq
_validate_type(data, np.ndarray, 'data')
if data.ndim != 3:
raise ValueError(
'data must be 3D with shape (n_epochs, n_channels, n_times), '
f'got {data.shape}')
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[-1] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using Stockwell Transform.
Same computation as `~mne.time_frequency.tfr_array_stockwell`, but operates
on `~mne.Epochs` objects instead of :class:`NumPy arrays <numpy.ndarray>`.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
%(verbose)s
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
References
----------
.. footbibliography::
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = _pick_data_channels(inst.info)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = tfr_array_stockwell(data, sfreq=info['sfreq'],
fmin=fmin, fmax=fmax, n_fft=n_fft,
width=width, decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
|
wmvanvliet/mne-python
|
mne/time_frequency/_stockwell.py
|
Python
|
bsd-3-clause
| 9,627
|
[
"Gaussian"
] |
4e88170cf41662924346ba324832cff4a4a0ea3533d6dfdbece22c8548328fc9
|
#!/usr/bin/env pythonfx
#
# This file is part of FIREwork
#
# FIREwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIREwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FIREwork. If not, see <http://www.gnu.org/licenses/>.
#
# FIREwork Copyright (C) 2008 - 2011 Julien Bert
# ==== Filtering ============================
# ===========================================
'''
TODO
# Low pass filter (Butterworth)
def volume_lp_filter(vol, fc, order):
from kernel import kernel_matrix_lp_H
from numpy import zeros
zo, yo, xo = vol.shape
volf = volume_fft(vol)
z, y, x = volf.shape
H = zeros((z, y, x), 'float32')
kernel_matrix_lp_H(H, fc, order)
volf *= H
vol = volume_ifft(volf, xo)
vol = vol.astype('float32')
#profil = image_1D_slice(H, c, c, w, c)
#freq = range(0, wo // 2 + 1)
#freq = array(freq, 'float32')
#freq /= float(wo)
return vol #, profil, freq
'''
'''
# 3D Metz filter
def filter_3d_Metz(vol, N, sig):
from firekernel import kernel_3D_conv_wrap_cuda
from numpy import where
smax = max(vol.shape)
H = filter_build_3d_Metz(smax, N, sig)
Hpad = filter_pad_3d_cuda(H)
z, y, x = vol.shape
vol = volume_pack_cube(vol)
kernel_3D_conv_wrap_cuda(vol, Hpad)
id = where(vol < 0)
vol[id] = 0
vol = volume_unpack_cube(vol, z, y, x)
return vol
# 3D Gaussian filter
def filter_3d_Gaussian(vol, sig):
from kernel import kernel_3D_conv_wrap_cuda
smax = max(vol.shape)
H = filter_build_3d_Gaussian(smax, sig)
Hpad = filter_pad_3d_cuda(H)
z, y, x = vol.shape
vol = volume_pack_cube(vol)
kernel_3D_conv_wrap_cuda(vol, Hpad)
vol = volume_unpack_cube(vol, z, y, x)
return vol
# 3D Butterworth low pass filter
def filter_3d_Butterworth_lp(vol, order, fc):
from kernel import kernel_3D_conv_wrap_cuda
smax = max(vol.shape)
H = filter_build_3d_Butterworth_lp(smax, order, fc)
Hpad = filter_pad_3d_cuda(H)
z, y, x = vol.shape
vol = volume_pack_cube(vol)
kernel_3D_conv_wrap_cuda(vol, Hpad)
vol = volume_unpack_cube(vol, z, y, x)
return vol
# 3D tangent hyperbolic low pass filter
def filter_3d_tanh_lp(vol, a, fc):
from kernel import kernel_3D_conv_wrap_cuda
smax = max(vol.shape)
H = filter_build_3d_tanh_lp(smax, a, fc)
Hpad = filter_pad_3d_cuda(H)
z, y, x = vol.shape
vol = volume_pack_cube(vol)
kernel_3D_conv_wrap_cuda(vol, Hpad)
vol = volume_unpack_cube(vol, z, y, x)
return vol
'''
# 2d low pass filter (Butterworth)
def filter_2d_butterworth_lp(im, fc, order):
smax = max(im.shape)
H = filter_build_2d_butterworth_lp(smax, order, fc)
return image_ifft(image_fft(im) * H)
# 2d high pass filter (Butterworth)
def filter_2d_butterworth_hp(im, fc, order):
smax = max(im.shape)
H = filter_build_2d_butterworth_hp(smax, order, fc)
return image_ifft(image_fft(im) * H)
# 2d band pass filter (Butterworth)
def filter_2d_butterworth_bp(im, fl, fh, order):
smax = max(im.shape)
H = filter_build_2d_butterworth_bp(size, order, fl, fh)
return image_ifft(image_fft(im) * H)
# 2d Metz filter
def filter_2d_metz(im, N, sig):
smax = max(im.shape)
H = filter_build_2d_Metz(smax, N, sig)
return image_ifft(image_fft(im) * H)
# 2d hyperbolic tangent low pass filter
def filter_2d_tanh_lp(im, a, fc):
smax = max(im.shape)
H = filter_build_2d_tanh_lp(smax, a, fc)
return image_ifft(image_fft(im) * H)
# 2d Gaussian filter
def filter_2d_gaussian(im, sig):
smax = max(im.shape)
H = filter_build_2d_Gaussian(smax, sig)
return image_ifft(image_fft(im) * H)
# 2d Lanczos2 filter
def filter_2d_lanczos2(im):
smax = max(im.shape)
H = filter_build_2d_lanczos(smax, a=2)
return image_ifft(image_fft(im) * H)
# 2d Lanczos3 filter
def filter_2d_lanczos3(im):
smax = max(im.shape)
H = filter_build_2d_lanczos(smax, a=3)
return image_ifft(image_fft(im) * H)
# ==== Build filter =========================
# ===========================================
def filter_build_3d_metz(size, N, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size, size, size), 'float32')
N += 1
for k in xrange(size):
for i in xrange(size):
for j in xrange(size):
fi = i - c
fj = j - c
fk = k - c
f = (fi*fi + fj*fj + fk*fk)**(0.5)
f /= size
gval = exp(-(f*f) / (2*sig*sig))
H[k, i, j] = (1 - (1 - gval*gval)**N) / gval
return H
def filter_build_2d_metz(size, N, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size, size), 'float32')
N += 1
for i in xrange(size):
for j in xrange(size):
fi = i - c
fj = j - c
f = (fi*fi + fj*fj)**(0.5)
f /= size
gval = exp(-(f*f) / (2*sig*sig))
H[i, j] = (1 - (1 - gval*gval)**N) / gval
return H
def filter_build_1d_metz(size, N, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size), 'float32')
N += 1
for i in xrange(size):
f = abs((i - c) / float(size))
gval = exp(-(f*f) / (2*sig*sig))
H[i] = (1 - (1 - gval*gval)**N) / gval
return H
def filter_build_3d_gaussian(size, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size, size, size), 'float32')
for k in xrange(size):
for i in xrange(size):
for j in xrange(size):
fi = i - c
fj = j - c
fk = k - c
f = (fi*fi + fj*fj + fk*fk)**(0.5)
f /= size
H[k, i, j] = exp(-(f*f) / (2*sig*sig))
return H
def filter_build_2d_gaussian(size, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size, size), 'float32')
for i in xrange(size):
for j in xrange(size):
fi = i - c
fj = j - c
f = (fi*fi + fj*fj)**(0.5)
f /= size
H[i, j] = exp(-(f*f) / (2*sig*sig))
return H
def filter_build_1d_gaussian(size, sig):
from numpy import zeros
from math import exp
c = size // 2
H = zeros((size), 'float32')
for i in xrange(size):
f = abs((i - c) / float(size))
H[i] = exp(-(f*f) / (2*sig*sig))
return H
def filter_build_3d_butterworth_lp(size, order, fc):
from numpy import zeros
order *= 2
c = size // 2
H = zeros((size, size, size), 'float32')
for k in xrange(size):
for i in xrange(size):
for j in xrange(size):
f = ((i-c)*(i-c) + (j-c)*(j-c) + (k-c)*(k-c))**(0.5) # radius
f /= size # frequency
H[k, i, j] = 1 / (1 + (f / fc)**order)**0.5 # filter
return H
def filter_build_2d_butterworth_lp(size, order, fc):
from numpy import zeros
order *= 2
c = size // 2
H = zeros((size, size), 'float32')
for i in xrange(size):
for j in xrange(size):
f = ((i-c)*(i-c) + (j-c)*(j-c))**(0.5) # radius
f /= size # frequency
H[i, j] = 1 / (1 + (f / fc)**order)**0.5 # filter
return H
def filter_build_2d_butterworth_hp(size, order, fc):
from numpy import zeros
order *= 2
c = size // 2
H = zeros((size, size), 'float32')
for i in xrange(size):
for j in xrange(size):
r = ((i-c)*(i-c) + (j-c)*(j-c))**(0.5) # radius
f = r / (w-1) # frequency
# build like a low pass filter with fc = 0.5 - fc and mirror f (0.5 - f)
H[i, j] = 1 / (1 + ((0.5-f) / (0.5-fc))**order)**0.5
return H
def filter_build_2d_butterworth_bp(size, order, fl, fh):
from numpy import zeros
order *= 2
c = size // 2
H = zeros((size, size), 'float32')
for i in xrange(size):
for j in xrange(size):
r = ((i-c)*(i-c) + (j-c)*(j-c))**(0.5) # radius
f = r / (w-1) # frequency
# low pass filter
a1 = 1 / (1 + (f / fh)**order)**0.5
# high pass filter
a2 = 1 / (1 + ((0.5-f) / (0.5-fl))**order)**0.5
# band pass filter
H[i, j] = a1 * a2
return H
def filter_build_1d_butterworth_lp(size, order, fc):
from numpy import zeros
order *= 2
c = size // 2
H = zeros((size), 'float32')
for i in xrange(size):
f = abs((i-c) / float(size))
H[i] = 1 / (1 + (f / fc)**order)**0.5
return H
def filter_build_3d_tanh_lp(size, a, fc):
from numpy import zeros
from math import tanh, pi
c = size // 2
H = zeros((size, size, size), 'float32')
for k in xrange(size):
for i in xrange(size):
for j in xrange(size):
f = ((i-c)*(i-c) + (j-c)*(j-c) + (k-c)*(k-c))**(0.5) # radius
f /= size # frequency
v = (pi * (f - fc)) / (2 * a * fc)
H[k, i, j] = 0.5 - (0.5 * tanh(v)) # filter
return H
def filter_build_2d_tanh_lp(size, a, fc):
from numpy import zeros, array
from math import tanh, pi
c = size // 2
H = zeros((size, size), 'float32')
for i in xrange(size):
for j in xrange(size):
f = ((i-c)*(i-c) + (j-c)*(j-c))**(0.5) # radius
f /= size # frequency
v = (pi * (f - fc)) / (2 * a * fc)
H[i, j] = 0.5 - (0.5 * tanh(v)) # filter
return H
def filter_build_1d_tanh_lp(size, a, fc):
from numpy import zeros, array
from math import tanh, pi
c = size // 2
H = zeros((size), 'float32')
for i in xrange(size):
f = abs((i-c) / float(size))
v = (pi * (f - fc)) / (2 * a * fc)
H[i] = 0.5 - (0.5 * tanh(v)) # filter
return H
def filter_build_3d_tanh_hp(size, a, fc):
H = filter_build_3d_tanh_lp(size, a, fc)
H = 1 - H
return H
def filter_build_2d_tanh_hp(size, a, fc):
H = filter_build_2d_tanh_lp(size, a, fc)
H = 1 - H
return H
def filter_build_1d_tanh_hp(size, a, fc):
filter_build_1d_tanh_lp(size, a, fc)
H = 1 - H
return H
def filter_build_2d_lanczos(size, a=2):
from numpy import zeros, sinc
a = float(a)
c = size // 2
H = zeros((size, size), 'float32')
p = a / 0.5
for i in xrange(size):
for j in xrange(size):
fi = i - c
fj = j - c
f = (fi*fi + fj*fj)**(0.5)
f /= size
f *= p
H[i, j] = sinc(f)*sinc(f / a)
return H
def filter_build_1d_lanczos(size, a=2):
from numpy import zeros, sinc
a = float(a)
c = size // 2
H = zeros((size), 'float32')
p = a / 0.5
for i in xrange(size):
f = p * abs((i-c) / float(size))
H[i] = sinc(f)*sinc(f / a)
return H
'''
def filter_pad_3d_cuda(H):
from numpy import zeros
size, size, size = H.shape
c = size // 2
nc = (size // 2) + 1
Hpad = zeros((size, size, size), 'float32')
for k in xrange(size):
for i in xrange(size):
for j in xrange(size):
padi = i - c
padj = j - c
padk = k - c
if padi < 0: padi = size + padi
if padj < 0: padj = size + padj
if padk < 0: padk = size + padk
Hpad[padk, padi, padj] = H[k, i, j]
return Hpad[:, :, :nc]
'''
# Return the profil of any filter
def filter_profil(H):
from numpy import arange
dim = len(H.shape)
if dim == 3:
nz, ny, nx = H.shape
cz = nz // 2
im = H[cz, :, :]
cx = nx // 2
cy = ny // 2
p = im[cy, cx:]
elif dim == 2:
ny, nx = H.shape
cx = nx // 2
cy = ny // 2
p = H[cy, cx:]
f = arange(len(p), dtype='float32') / float(nx)
return p, f
|
hksonngan/astir.firework
|
utils/filter.py
|
Python
|
gpl-3.0
| 13,249
|
[
"Gaussian"
] |
1ab2cd640096c7e8bb44d607b5024a31bb8f436a807549f39d46dbee1e3a924f
|
#!/usr/bin/env python2
import os.path
import subprocess
import sys
import urllib
KEY_FILE = "submit.token"
def main(filename):
# Prompt for key if missing
if not os.path.exists(KEY_FILE):
print "Please visit http://css.csail.mit.edu/6.858/2015/labs/handin.html"
print "and enter your API key."
key = raw_input("Key: ").strip()
with open(KEY_FILE, "w") as f:
f.write(key + "\n")
print "API key written to %s" % KEY_FILE
# Read the key.
with open(KEY_FILE) as f:
key = f.read().strip()
# Shell out to curl. urllib2 doesn't deal with multipart attachments. Throw
# away the output; you just get a random HTML page.
with open("/dev/null", "a") as null:
subprocess.check_call(["curl", "-f",
"-F", "file=@%s" % filename,
"-F", "key=%s" % key,
"https://6858.csail.mit.edu/upload"],
stdout=null, stderr=null)
print "Submitted %s." % filename
print "Please visit http://css.csail.mit.edu/6.858/2015/labs/handin.html"
print "to verify the upload."
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s TARBALL" % sys.argv[0]
sys.exit(1)
main(sys.argv[1])
|
rychipman/858-labs
|
submit.py
|
Python
|
mit
| 1,317
|
[
"VisIt"
] |
0018111be1e24044bffe32e80ec751d41128c8e77ad5488fcdb8847ea3541680
|
#!/usr/bin/env python
# test --create
command += oiiotool ("--create 320x240 3 -d uint8 -o black.tif")
command += oiiotool ("--stats black.tif")
# test --pattern constant
command += oiiotool ("--pattern constant:color=.1,.2,.3,1 320x240 4 -o constant.tif")
command += oiiotool ("--stats constant.tif")
# test --pattern noise
command += oiiotool ("--pattern noise:type=uniform:min=0.25:max=0.75 64x64 3 -d uint8 -o noise-uniform3.tif")
command += oiiotool ("--pattern noise:type=gaussian:mean=0.5:stddev=0.1 64x64 3 -d uint8 -o noise-gauss.tif")
command += oiiotool ("--pattern noise:type=salt:portion=0.01:value=1 64x64 3 -d uint8 -o noise-salt.tif")
# test --pattern fill
command += oiiotool ("--pattern fill:color=0,0,0.5 64x64 3 -d uint8 -o pattern-const.tif")
command += oiiotool ("--pattern fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 3 -d uint8 -o pattern-gradientv.tif")
command += oiiotool ("--pattern fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 3 -d uint8 -o pattern-gradienth.tif")
command += oiiotool ("--pattern fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 3 -d uint8 -o pattern-gradient4.tif")
# test --fill
command += oiiotool ("--create 256x256 3 --fill:color=1,.5,.5 256x256 --fill:color=0,1,0 80x80+100+100 -d uint8 -o filled.tif")
command += oiiotool ("--create 64x64 3 --fill:top=0.1,0.1,0.1:bottom=0,0,0.5 64x64 -d uint8 -o fillv.tif")
command += oiiotool ("--create 64x64 3 --fill:left=0.1,0.1,0.1:right=0,0.5,0 64x64 -d uint8 -o fillh.tif")
command += oiiotool ("--create 64x64 3 --fill:topleft=0.1,0.1,0.1:topright=0,0.5,0:bottomleft=0.5,0,0:bottomright=0,0,0.5 64x64 -d uint8 -o fill4.tif")
# test --line
command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " +
"-line:color=0.25,0,0,0.25 10,60,250,20 " +
"-line:color=0.5,0,0,0.5 10,62,250,100 " +
"-line:color=1,0,0,1 10,64,250,400 " +
"-line:color=0,1,0,1 250,100,10,184 " +
"-line:color=0,0.5,0,0.5 250,200,10,182 " +
"-line:color=0,0.25,0,0.25 100,400,10,180 " +
"-line:color=.5,.5,0,0.5 100,100,120,100,120,100,120,120,120,120,100,120,100,120,100,100 " +
"-box:color=0,0.5,0.5,0.5 150,100,240,180 " +
"-d uint8 -o lines.tif")
# test --box
command += oiiotool ("--pattern checker:color1=.1,.1,.1:color2=0,0,0 256x256 3 " +
"--box:color=0,1,1,1 150,100,240,180 " +
"--box:color=0.5,0.5,0,0.5:fill=1 100,50,180,140 " +
"-d uint8 -o box.tif")
# test --point
command += oiiotool ("--create 64x64 3 " +
"--point:color=0,1,1,1 50,10 " +
"--point:color=1,0,1,1 20,20,30,30,40,40 " +
"-d uint8 -o points.tif")
# To add more tests, just append more lines like the above and also add
# the new 'feature.tif' (or whatever you call it) to the outputs list,
# below.
# Outputs to check against references
outputs = [ "pattern-const.tif", "pattern-gradienth.tif",
"pattern-gradientv.tif", "pattern-gradient4.tif",
"noise-uniform3.tif", "noise-gauss.tif", "noise-salt.tif",
"filled.tif", "fillh.tif", "fillv.tif", "fill4.tif",
"lines.tif", "box.tif", "points.tif",
"out.txt" ]
#print "Running this command:\n" + command + "\n"
|
OpenImageIO/oiio
|
testsuite/oiiotool-pattern/run.py
|
Python
|
bsd-3-clause
| 3,464
|
[
"Gaussian"
] |
925daec2b1d2cc6d6865efde7bae479518f8453fbf998ca9db15ce98a0974c32
|
# coding: utf-8
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R"""Simulate rounded, faceted shapes in molecular dynamics.
The DEM component provides forces which apply short-range, purely
repulsive interactions between contact points of two shapes. The
resulting interaction is consistent with expanding the given polygon
or polyhedron by a disk or sphere of a particular rounding radius.
The pair forces located in :py:mod:`hoomd.dem.pair` behave like other
hoomd pair forces, computing forces and torques for each particle
based on its interactions with its neighbors. Also included are
geometric helper utilities in :py:mod:`hoomd.dem.utils`.
Initialization
--------------
When initializing systems, be sure to set the inertia tensor of DEM
particles. Axes with an inertia tensor of 0 (the default) will not
have their rotational degrees of freedom integrated. Because only the
three principal components of inertia are given to hoomd, particle
vertices should also be specified in the principal reference frame so
that the inertia tensor is diagonal.
Example::
snap = hoomd.data.make_snapshot(512, box=hoomd.data.boxdim(L=10))
snap.particles.moment_inertia[:] = (10, 10, 10)
system = hoomd.init.read_snapshot(snap)
Integration
-----------
To allow particles to rotate, use integrators which can update
rotational degrees of freedom:
* `hoomd.md.methods.NVE`
* `hoomd.md.methods.NVT`
* `hoomd.md.methods.NPT`
* `hoomd.md.methods.Langevin`
* `hoomd.md.methods.Brownian`
Note that the Nosé-Hoover thermostats used in
`hoomd.md.methods.NVT` and `hoomd.md.methods.NPT`
work by rescaling momenta and angular momenta. This can lead to
instabilities in the start of the simulation if particles are
initialized with 0 angular momentum and no neighbor interactions. Two
easy fixes for this problem are to initialize each particle with some
angular momentum or to first run for a few steps with
`hoomd.md.methods.Langevin` or
`hoomd.md.methods.Brownian`.
Data Storage
------------
To store trajectories of DEM systems, use a format that knows about
anisotropic particles, such as:
* :py:class:`hoomd.dump.getar`
* :py:class:`hoomd.dump.GSD`
.. rubric:: Stability
:py:mod:`hoomd.dem` is **stable**. When upgrading from version 2.x to 2.y (y > x),
existing job scripts that follow *documented* interfaces for functions and classes
will not require any modifications. **Maintainer:** Matthew Spellings.
"""
# this file exists to mark this directory as a python module
# need to import all submodules defined in this directory
from hoomd.dem import pair
from hoomd.dem import params
from hoomd.dem import utils
|
joaander/hoomd-blue
|
hoomd/dem/__init__.py
|
Python
|
bsd-3-clause
| 2,758
|
[
"HOOMD-blue"
] |
5ad9b68e90c3b78546c96e3bb55613b5ad453974d162c68a75f9e52b2f71b16b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# infile_maker.py
#
# Copyright 2016 Carlos Eduardo Sequeiros Borja <casebor@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
Module useful only for the creation of input files of the Amber psoftware.
This module can create leap, minimization and molecular dynamics input files.
"""
global mdin, remdin, minin
mdin = """# Input md file generated automatically
&cntrl
imin=0,
ntx= irest= ntrx=1,
ntxo=1, ntpr=500, ntave=0, ntwr=10000,
iwrap=0, ntwx=500, ntwv=0, ioutfm=1, ntwe=0,
ibelly= ntr= ntwprt=
nstlim= t=0.0, dt=0.002, nrespa=1,
ntt=2, tempi= ig= tautp=5.0,
gamma_ln=0, vlimit=20.0,
ntp=0, pres0=1.0, comp=44.6, taup=1.0,
ntc=2, tol=0.00001, jfastw=0,
ntf=2, ntb=0, dielc=1.0, cut=999.0, nsnb=10,
igb=1, intdiel=1.0, extdiel=78.5, saltcon=0.0, rgbmax=10.0,
rbornstat=0, offset=0.09, gbsa=1, surften=0.005,
nmropt=
/
"""
remdin = """# Input remd file generated automatically
&cntrl
imin=0,
ntx=5, irest=1, ntrx=1,
ntxo=1, ntpr=500, ntave=0, ntwr=10000,
iwrap=0, ntwx=500, ntwv=0, ioutfm=1, ntwe=0,
ibelly= ntr= ntwprt=
nstlim= t=0.0, dt=0.002, nrespa=1,
ntt=2, temp0= ig= tautp=5.0,
gamma_ln=0, vlimit=20.0,
ntp=0, pres0=1.0, comp=44.6, taup=1.0,
ntc=2, tol=0.00001, jfastw=0,
ntf=2, ntb=0, dielc=1.0, cut=999.0, nsnb=10,
igb=1, intdiel=1.0, extdiel=78.5, saltcon=0.0, rgbmax=10.0,
rbornstat=0, offset=0.09, gbsa=1, surften=0.005,
nmropt=1,
/
&wt TYPE='END'
/
DISANG=
"""
minin = """# Input minimization file generated automatically
&cntrl
imin=1, ntx=1, irest=0, ntpr=100, ntwr=250, ntwprt=0,
ibelly=1, ntr=0,
maxcyc=1000, ncyc=100, ntmin=1, dx0=0.01, drms=0.0001,
ntc=1, ntf=1, ntb=0, cut=15, igb=1, saltcon=0.0, rgbmax=10.0,
gbsa=1, surften=0.005, rdt=0.00
&end
"""
def make_mdin(ntx, irest, temp, steps, restr_m, belly_m, ig, nmr='0', disang=None, ntwprt=None):
"""Returns a string containing the input parameters for an Amber molecular
simulation.
All parameters are strings.
######################################################################################
# #
# This function makes the inputs for the Molecular Dynamics in a String format, #
# where ntx, irest and nscm are the same variables that you use in Amber, temp is #
# the temperature (in the first step of the MD you will raise the temperature from #
# 10.0 to the temperature specified, steps refers to ntslim in Amber, and this #
# steps should be the same for every input file for the MD. restrM and bellyM are #
# the restraint and restriction masks for Amber respectivelly. #
# All the parameters are treated as Strings. #
# #
######################################################################################
"""
lines = mdin.split('\n')
i = 0
while i<len(lines):
if 'ntx=' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'ntx' in parms[j]:
parms[j] += ntx + ','
elif 'irest' in parms[j]:
parms[j] += irest + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'ibelly' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'ibelly' in parms[j]:
if len(belly_m)>0:
parms[j] += '1, bellymask=' + belly_m + ','
else:
parms[j] += '0,'
elif 'ntr' in parms[j]:
if len(restr_m)>0:
parms[j] += '1, restraint_wt=1.0, restraintmask=' + restr_m + ','
else:
parms[j] += '0,'
elif 'ntwprt' in parms[j]:
if ntwprt is not None:
parms[j] += ntwprt + ','
else:
parms[j] += ''
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'nstlim' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'nstlim' in parms[j]:
parms[j] += steps + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'tempi' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'tempi' in parms[j]:
if ntx == '1':
parms[j] += '10.0, temp0=' + temp + ','
else:
parms[j] += temp + ', temp0=' + temp + ','
if 'ig=' in parms[j]:
parms[j] += ig + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'nmropt' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'nmropt' in parms[j]:
parms[j] += nmr + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
i += 1
md_in = '\n'.join(lines)
if nmr != '0' and disang is not None:
md_in += '&wt TYPE=\'END\'\n /\nDISANG=' + disang
else:
print('Error, you did not supply a valid chilarity restriction file')
quit()
return md_in
def make_remdin(temp, steps, restr_m, belly_m, disang, ig, n_exchg='0', ntwprt=None):
"""Returns a string containing the input parameters for an Amber REMD
molecular simulation.
All parameters are strings.
######################################################################################
# #
# This function makes the inputs for the Molecular Dynamics in a String format, #
# where temp is the temperature, in the first step of the MD you will raise the #
# temperature from 0.0 to the temperature specified, steps refers to ntslim in #
# Amber, and this steps should be the same for every input file for the REMD, #
# restrM and bellyM are the restraint and restriction masks for Amber #
# respectivelly. #
# All the parameters are treated as Strings. #
# #
######################################################################################
"""
lines = remdin.split('\n')
i = 0
while i<len(lines):
if 'ibelly=' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'ibelly' in parms[j]:
if len(belly_m)>0:
parms[j] += '1, bellymask=' + belly_m + ','
else:
parms[j] += '0,'
elif 'ntr' in parms[j]:
if len(restr_m)>0:
parms[j] += '1, restraint_wt=1.0, restraintmask=' + restr_m + ','
else:
parms[j] += '0,'
elif 'ntwprt' in parms[j]:
if ntwprt is not None:
parms[j] += ntwprt + ','
else:
parms[j] += ''
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'nstlim' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'nstlim' in parms[j]:
parms[j] += steps + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'temp0' in lines[i]:
parms = lines[i].split()
j = 0
while j<len(parms):
if 'temp0' in parms[j]:
parms[j] += temp + ','
if n_exchg != '0':
parms[j] += ' numexchg=' + n_exchg + ','
if 'ig=' in parms[j]:
parms[j] += ig + ','
j += 1
lines[i] = ' ' + ' '.join(parms)
elif 'DISANG' in lines[i]:
lines[i] += disang
i += 1
remd_in = '\n'.join(lines)
return remd_in
def make_minin(aa_num):
"""Returns a string with the parameters for the minimization step of the
molecular simulation in Amber.
Parameter aa_num is an integer.
######################################################################################
# #
# This makes the input file for the minimization step, if the parameters for the #
# minimization are going to be changed, you must do it in the minin string. #
# #
######################################################################################
"""
# Separate the lines of the minimization pattern
lines = minin.split('\n')
i = 0
while i<len(lines):
if 'ibelly' in lines[i]:
# Create and add the mask
lines[i] += ' bellymask=":1-%s,"' %(aa_num)
i += 1
# Recreate the minimization pattern
min_in = '\n'.join(lines)
return min_in
def make_leap(input_file, funnel):
"""Return a string containing all the orders for the tleap module of the Amber software to
perform the REMD. Needs as parameters the pdb name files of the protein and the funnel.
Both parameters are strings consisting only of file names.
######################################################################################
# #
# This makes the input file for tleap. For this step you must have the #
# pdb of the protein and the funnel in the same place this script is. #
# #
######################################################################################
"""
leap_in = 'source leaprc.ff14SB_Ar\n'
leap_in += 'prot = loadPdb %s\n' %(input_file)
leap_in += 'fun = loadPdb %s\n' %(funnel)
#leap_in += 'seq = { LEU LYS ASN ALA LYS GLU ASP ALA ILE ALA GLU LEU LYS LYS ALA GLY ILE THR SER ASP PHE TYR PHE ASN ALA ILE ASN LYS ALA LYS THR VAL GLU GLU VAL ASN ALA LEU LYS ASN GLU ILE LEU LYS ALA }\n'
#leap_in += 'prot = loadPdbUsingSeq %s seq\n' %(input_file)
leap_in += 'fun = loadPdb %s\n' %(funnel)
leap_in += 'complex = combine {prot fun}\n'
leap_in += 'saveAmberParm prot %s.prmtop %s.inpcrd\n' %(input_file[:-4], input_file[:-4])
leap_in += 'saveAmberParm complex %s-%s.prmtop %s-%s.inpcrd\n' %(input_file[:-4], funnel[:-4],
input_file[:-4], funnel[:-4])
leap_in += 'savePdb prot %s.pdb\n' %(input_file[:-4])
leap_in += 'savePdb complex %s-%s.pdb\n' %(input_file[:-4], funnel[:-4])
leap_in += 'quit\n'
return leap_in
def make_grp_file(name, parmtop, num_files, cycle):
"""Returns a string with the commands of a REMD group file for Amber.
Parameters:
name: Pattern for input, output, coordinates, trajectory, restart,
info and reference files. Type string.
parmtop: Topology parameter file. Type string.
num_files: Number of files to add at the group file. Type integer.
cycle: Flag for the equilibration step. Type integer.
######################################################################################
# #
# For every replicate a command is made sequentially. #
# #
######################################################################################
"""
grp = ''
if cycle == 0:
for i in range(num_files):
grp += '-O -i %s-000-mdin-rep.%3s -o %s-000-mdout-rep.%3s -c min.rst -p %s.prmtop -r %s-000-rep-%3s.rst -x %s-000-nc-rep.%3s -inf %s-000-mdinfo-rep.%3s -ref min.rst\n'\
%(name, str(i).rjust(3, '0'), name, str(i).rjust(3, '0'), parmtop, name, str(i).rjust(3, '0'), name, str(i).rjust(3, '0'), name, str(i).rjust(3, '0'))
else:
for i in range(num_files):
grp += '-O -i %s-%3s-mdin-rep.%3s -o %s-%3s-mdout-rep.%3s -c %s-%3s-rep-%3s.rst -p %s.prmtop -r %s-%3s-rep-%3s.rst -x %s-%3s-nc-rep.%3s -inf %s-%3s-mdinfo-rep.%3s -ref %s-%3s-rep-%3s.rst\n'\
%(name, str(cycle).rjust(3, '0'), str(i).rjust(3, '0'), name, str(cycle).rjust(3, '0'), str(i).rjust(3, '0'), name, str(cycle-1).rjust(3, '0'), str(i).rjust(3, '0'), parmtop, name, str(cycle).rjust(3, '0'), str(i).rjust(3, '0'), name, str(cycle).rjust(3, '0'), str(i).rjust(3, '0'), name, str(cycle).rjust(3, '0'), str(i).rjust(3, '0'), name, str(cycle-1).rjust(3, '0'), str(i).rjust(3, '0'))
return grp
|
casebor/labioscripts
|
python/infile_maker.py
|
Python
|
gpl-3.0
| 12,974
|
[
"Amber"
] |
8875e2ad3b1e15abeb7633bfd510ce78a07b552e0b0d917eab5d2cec5892fcf4
|
import unittest
import numpy as np
import pysal
from pysal.spreg.twosls import TSLS, BaseTSLS
from scipy import sparse as SP
class TestBaseTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_array_almost_equal(reg.h.toarray()[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_array_almost_equal(reg.hth, hth, 7)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_array_almost_equal(reg.hthi, hthi, 7)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 35.128823897959187, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_array_almost_equal(reg.pfora1a2, pfora1a2, 7)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
self.assertAlmostEqual(reg.sig2n_k, 587.56797852699822, 7)
self.assertAlmostEqual(reg.sig2n, 551.5944288212637, 7)
self.assertAlmostEqual(reg.sig2, 551.5944288212637, 7)
self.assertAlmostEqual(reg.std_y, 16.732092091229699, 7)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 27028.127012241919, 7)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0], x_0, 7)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_array_almost_equal(reg.z.toarray()[0], z_0, 7)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_n_k(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_white(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 231.07254978, 15.42050291, -11.3941033 ],
[ 15.01376346, 1.92422887, -1.11865505],
[ -11.34381641, -1.1279227 , 0.72053806]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
class TestTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = SP.csr_matrix(self.X)
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = TSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_array_almost_equal(reg.h.toarray()[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_array_almost_equal(reg.hth, hth, 7)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_array_almost_equal(reg.hthi, hthi, 7)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 35.128823897959187, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_array_almost_equal(reg.pfora1a2, pfora1a2, 7)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
self.assertAlmostEqual(reg.sig2n_k, 587.56797852699822, 7)
self.assertAlmostEqual(reg.sig2n, 551.5944288212637, 7)
self.assertAlmostEqual(reg.sig2, 551.5944288212637, 7)
self.assertAlmostEqual(reg.std_y, 16.732092091229699, 7)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 27028.127012241919, 7)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0], x_0, 7)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_array_almost_equal(reg.z.toarray()[0], z_0, 7)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
self.assertAlmostEqual(reg.pr2, 0.27936137128173893, 7)
z_stat = np.array([[ 5.84526447e+00, 5.05764078e-09],
[ 3.67601567e-01, 7.13170346e-01],
[ -1.99468913e+00, 4.60767956e-02]])
np.testing.assert_array_almost_equal(reg.z_stat, z_stat, 7)
title = 'TWO STAGE LEAST SQUARES'
self.assertEqual(reg.title, title)
def test_n_k(self):
reg = TSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_white(self):
reg = TSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertEqual(reg.robust, 'white')
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
reg = TSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertEqual(reg.robust, 'hac')
def test_spatial(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
reg = TSLS(self.y, self.X, self.yd, self.q, spat_diag=True, w=w)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
ak_test = np.array([ 1.16816972, 0.27977763])
np.testing.assert_array_almost_equal(reg.ak_test, ak_test, 7)
def test_names(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['hoval']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = TSLS(self.y, self.X, self.yd, self.q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
self.assertListEqual(reg.name_yend, name_yend)
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
|
spreg-git/pysal
|
pysal/spreg/tests/test_twosls_sparse.py
|
Python
|
bsd-3-clause
| 14,544
|
[
"COLUMBUS"
] |
bf472607afd7bd65e0c0bc91c3f1d7c4f1cd7665c49c5de5c6088dfe7afb0991
|
#!/usr/bin/env python
"""Catalysis Micro-kinetic Analysis Package (CatMAP)"""
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from catmap import __version__ as version
maintainer = 'Andrew J. Medford'
maintainer_email = 'ajmedfor@slac.stanford.edu'
author = maintainer
author_email = maintainer_email
description = __doc__
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Windows',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
]
requires = ['ase',
'matplotlib',
'mpmath',
'numpy',
]
license = 'COPYING.txt'
long_description = file('README.md').read()
name='python-catmap'
packages = [
'catmap',
'catmap.analyze',
'catmap.data',
'catmap.mappers',
'catmap.parsers',
'catmap.scalers',
'catmap.solvers',
'catmap.thermodynamics',
]
package_dir = {'catmap':'catmap'}
package_data = {'catmap':[]}
platforms = ['linux', 'windows']
if os.name == 'nt':
scripts = []
else:
scripts = [
'tools/catmap'
]
url = 'https://github.com/ajmedford/catmap'
setup(
author=author,
author_email=author_email,
description=description,
license=license,
long_description=long_description,
maintainer=maintainer,
maintainer_email=maintainer_email,
name=name,
package_data=package_data,
package_dir=package_dir,
packages=packages,
platforms=platforms,
scripts=scripts,
url=url,
version=version,
)
|
chuanshi/catmap
|
setup.py
|
Python
|
gpl-3.0
| 2,231
|
[
"ASE"
] |
ef4ef5159c351add65821baa9adbb6837d203848da3f907ccbac5c136154b554
|
"""
atsp.py: solve the asymmetric traveling salesman problem
formulations implemented:
- mtz -- Miller-Tucker-Zemlin's potential formulation
- mtz_strong -- Miller-Tucker-Zemlin's potential formulation with stronger constraint
- scf -- single-commodity flow formulation
- mcf -- multi-commodity flow formulation
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
def mtz(n,c):
"""mtz: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem
(potential formulation)
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("atsp - mtz")
x,u = {},{}
for i in range(1,n+1):
u[i] = model.addVar(lb=0, ub=n-1, vtype="C", name="u(%s)"%i)
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for i in range(1,n+1):
for j in range(2,n+1):
if i != j:
model.addCons(u[i] - u[j] + (n-1)*x[i,j] <= n-2, "MTZ(%s,%s)"%(i,j))
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,u
return model
def mtz_strong(n,c):
"""mtz_strong: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem
(potential formulation, adding stronger constraints)
Parameters:
n - number of nodes
c[i,j] - cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("atsp - mtz-strong")
x,u = {},{}
for i in range(1,n+1):
u[i] = model.addVar(lb=0, ub=n-1, vtype="C", name="u(%s)"%i)
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for i in range(1,n+1):
for j in range(2,n+1):
if i != j:
model.addCons(u[i] - u[j] + (n-1)*x[i,j] + (n-3)*x[j,i] <= n-2, "LiftedMTZ(%s,%s)"%(i,j))
for i in range(2,n+1):
model.addCons(-x[1,i] - u[i] + (n-3)*x[i,1] <= -2, name="LiftedLB(%s)"%i)
model.addCons(-x[i,1] + u[i] + (n-3)*x[1,i] <= n-2, name="LiftedUB(%s)"%i)
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,u
return model
def scf(n,c):
"""scf: single-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("atsp - scf")
x,f = {},{}
for i in range(1,n+1):
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
if i==1:
f[i,j] = model.addVar(lb=0, ub=n-1, vtype="C", name="f(%s,%s)"%(i,j))
else:
f[i,j] = model.addVar(lb=0, ub=n-2, vtype="C", name="f(%s,%s)"%(i,j))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
model.addCons(quicksum(f[1,j] for j in range(2,n+1)) == n-1, "FlowOut")
for i in range(2,n+1):
model.addCons(quicksum(f[j,i] for j in range(1,n+1) if j != i) - \
quicksum(f[i,j] for j in range(1,n+1) if j != i) == 1, "FlowCons(%s)"%i)
for j in range(2,n+1):
model.addCons(f[1,j] <= (n-1)*x[1,j], "FlowUB(%s,%s)"%(1,j))
for i in range(2,n+1):
if i != j:
model.addCons(f[i,j] <= (n-2)*x[i,j], "FlowUB(%s,%s)"%(i,j))
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,f
return model
def mcf(n,c):
"""mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem
Parameters:
- n: number of nodes
- c[i,j]: cost for traversing arc (i,j)
Returns a model, ready to be solved.
"""
model = Model("mcf")
x,f = {},{}
for i in range(1,n+1):
for j in range(1,n+1):
if i != j:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
if i != j and j != 1:
for k in range(2,n+1):
if i != k:
f[i,j,k] = model.addVar(ub=1, vtype="C", name="f(%s,%s,%s)"%(i,j,k))
for i in range(1,n+1):
model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i)
model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i)
for k in range(2,n+1):
model.addCons(quicksum(f[1,i,k] for i in range(2,n+1) if (1,i,k) in f) == 1, "FlowOut(%s)"%k)
model.addCons(quicksum(f[i,k,k] for i in range(1,n+1) if (i,k,k) in f) == 1, "FlowIn(%s)"%k)
for i in range(2,n+1):
if i != k:
model.addCons(quicksum(f[j,i,k] for j in range(1,n+1) if (j,i,k) in f) == \
quicksum(f[i,j,k] for j in range(1,n+1) if (i,j,k) in f),
"FlowCons(%s,%s)"%(i,k))
for (i,j,k) in f:
model.addCons(f[i,j,k] <= x[i,j], "FlowUB(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize")
model.data = x,f
return model
def sequence(arcs):
"""sequence: make a list of cities to visit, from set of arcs"""
succ = {}
for (i,j) in arcs:
succ[i] = j
curr = 1 # first node being visited
sol = [curr]
for i in range(len(arcs)-2):
curr = succ[curr]
sol.append(curr)
return sol
if __name__ == "__main__":
n = 5
c = { (1,1):0, (1,2):1989, (1,3):102, (1,4):102, (1,5):103,
(2,1):104, (2,2):0, (2,3):11, (2,4):104, (2,5):108,
(3,1):107, (3,2):108, (3,3):0, (3,4):19, (3,5):102,
(4,1):109, (4,2):102, (4,3):107, (4,4):0, (4,5):15,
(5,1):13, (5,2):103, (5,3):104, (5,4):101, (5,5):0,
}
model = mtz(n,c)
model.hideOutput() # silent mode
model.optimize()
cost = model.getObjVal()
print()
print("Miller-Tucker-Zemlin's model:")
print("Optimal value:", cost)
#model.printAttr("X")
for v in model.getVars():
if model.getVal(v) > 0.001:
print(v.name, "=", model.getVal(v))
x,u = model.data
sol = [i for (p,i) in sorted([(int(model.getVal(u[i])+.5),i) for i in range(1,n+1)])]
print(sol)
arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5]
sol = sequence(arcs)
print(sol)
# assert cost == 5
model = mtz_strong(n,c)
model.hideOutput() # silent mode
model.optimize()
cost = model.getObjVal()
print()
print("Miller-Tucker-Zemlin's model with stronger constraints:")
print("Optimal value:",cost)
#model.printAttr("X")
for v in model.getVars():
if model.getVal(v) > 0.001:
print(v.name, "=", model.getVal(v))
x,u = model.data
sol = [i for (p,i) in sorted([(int(model.getVal(u[i])+.5),i) for i in range(1,n+1)])]
print(sol)
arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5]
sol = sequence(arcs)
print(sol)
# assert cost == 5
model = scf(n,c)
model.hideOutput() # silent mode
model.optimize()
cost = model.getObjVal()
print()
print("Single-commodity flow formulation:")
print("Optimal value:",cost)
#model.printAttr("X")
for v in model.getVars():
if model.getVal(v) > 0.001:
print(v.name, "=", model.getVal(v))
x,f = model.data
arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5]
sol = sequence(arcs)
print(sol)
# assert cost == 5
model = mcf(n,c)
model.hideOutput() # silent mode
model.optimize()
cost = model.getObjVal()
print()
print("Multi-commodity flow formulation:")
print("Optimal value:",cost)
#model.printAttr("X")
for v in model.getVars():
if model.getVal(v)>0.001:
print(v.name, "=", model.getVal(v))
x,f = model.data
arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5]
sol = sequence(arcs)
print(sol)
# assert cost == 5
|
mattmilten/PySCIPOpt
|
examples/finished/atsp.py
|
Python
|
mit
| 8,748
|
[
"VisIt"
] |
8d0cc889cfdcaead83151a36a3c325ee8ae601e8a24cff751e20f206903a2918
|
# Copyright 2013 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with data from the KEGG database.
References:
Kanehisa, M. and Goto, S.; KEGG: Kyoto Encyclopedia of Genes and Genomes.
Nucleic Acids Res. 28, 29-34 (2000).
URL: http://www.genome.ad.jp/kegg/
"""
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/KEGG/KGML/__init__.py
|
Python
|
gpl-2.0
| 468
|
[
"Biopython"
] |
681ddaeec7cad6b94204316a0ae6a01a763bb2887fc3d1ac45ec205a28843633
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
class VerseReferenceList(object):
"""
The VerseReferenceList class encapsulates a list of verse references, but
maintains the order in which they were added.
"""
def __init__(self):
self.verse_list = []
self.version_list = []
self.current_index = -1
def add(self, book, chapter, verse, version, copyright, permission):
self.add_version(version, copyright, permission)
if not self.verse_list or self.verse_list[self.current_index][u'book'] != book:
self.verse_list.append({u'version': version, u'book': book,
u'chapter': chapter, u'start': verse, u'end': verse})
self.current_index += 1
elif self.verse_list[self.current_index][u'chapter'] != chapter:
self.verse_list.append({u'version': version, u'book': book,
u'chapter': chapter, u'start': verse, u'end': verse})
self.current_index += 1
elif (self.verse_list[self.current_index][u'end'] + 1) == verse:
self.verse_list[self.current_index][u'end'] = verse
else:
self.verse_list.append({u'version': version, u'book': book,
u'chapter': chapter, u'start': verse, u'end': verse})
self.current_index += 1
def add_version(self, version, copyright, permission):
for bible_version in self.version_list:
if bible_version[u'version'] == version:
return
self.version_list.append({u'version': version, u'copyright': copyright, u'permission': permission})
def format_verses(self):
result = u''
for index, verse in enumerate(self.verse_list):
if index == 0:
result = u'%s %s:%s' % (verse[u'book'], verse[u'chapter'], verse[u'start'])
if verse[u'start'] != verse[u'end']:
result = u'%s-%s' % (result, verse[u'end'])
continue
prev = index - 1
if self.verse_list[prev][u'version'] != verse[u'version']:
result = u'%s (%s)' % (result, self.verse_list[prev][u'version'])
result += u', '
if self.verse_list[prev][u'book'] != verse[u'book']:
result = u'%s%s %s:' % (result, verse[u'book'], verse[u'chapter'])
elif self.verse_list[prev][u'chapter'] != verse[u'chapter']:
result = u'%s%s:' % (result, verse[u'chapter'])
result += str(verse[u'start'])
if verse[u'start'] != verse[u'end']:
result = u'%s-%s' % (result, verse[u'end'])
if len(self.version_list) > 1:
result = u'%s (%s)' % (result, verse[u'version'])
return result
def format_versions(self):
result = u''
for index, version in enumerate(self.version_list):
if index > 0:
if result[-1] not in [u';', u',', u'.']:
result += u';'
result += u' '
result = u'%s%s, %s' % (result, version[u'version'], version[u'copyright'])
if version[u'permission'].strip():
result = result + u', ' + version[u'permission']
result = result.rstrip()
if result.endswith(u','):
return result[:len(result)-1]
return result
|
marmyshev/transitions
|
openlp/plugins/bibles/lib/versereferencelist.py
|
Python
|
gpl-2.0
| 5,402
|
[
"Brian"
] |
8eb6c7194d03198067b0d1eeaf0e591aa65547dcbf750f811dcb453fe85d7af2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
background:
- Pictures exported from iPhoto do not have their original date in the finder.
- This info is hidden in exif
- this has to do with the daily photo app on iOS
This script
- loops over images in a folder (exported from iPhoto)
- extracts date-time-stamp from exif information
- saves image with a date stamp (PIL)
possible Improvements:
- currently movie is made using command line `mencoder`. This could soon be accomplished using OpenCV
"""
import sys
import os
import datetime
import numpy as np
from PIL import Image
from PIL.ExifTags import TAGS
from PIL import ImageFont
from PIL import ImageDraw
__author__ = "Claus Haslauer (mail@planetwater.org)"
__version__ = "$Revision: 0.1 $"
__date__ = datetime.date(2015,2,21)
__copyright__ = "Copyright (c) 2015 Claus P. Haslauer"
__license__ = "Python"
def main():
# folder where images (exported from iPhoto) reside
top_path = r'XXX'
# folder where output images are to be saved
out_path = r'YYY'
# FONT info: path where your fonts reside
font_path = r'/YYY'
font_file = "AlteHaasGroteskRegular.ttf"
# start investigating into top_path
filenames = next(os.walk(top_path))[2]
n_fnames = len(filenames)
print "found %i pictures" % n_fnames
for cur_fname in filenames:#[:10]:
if cur_fname[0] != '.':
print cur_fname
cur_fobj = os.path.join(top_path,cur_fname)
# EXIF INFORMATION
cur_exif = get_exif(cur_fobj)
aufnahme_datum_str = cur_exif['DateTimeOriginal']
# 'DateTimeOriginal': '2014:08:27 14:01:39'
aufname_time_fmt = '%Y:%m:%d %H:%M:%S'
aufname_datum = datetime.datetime.strptime(aufnahme_datum_str, aufname_time_fmt)
# 'ExifImageHeight': 1280,
height = int(cur_exif['ExifImageHeight'])
width = int(cur_exif['ExifImageWidth'])
# SHOW IMAGE
im = Image.open(cur_fobj)
draw = ImageDraw.Draw(im)
font_fobj = os.path.join(font_path, font_file)
font = ImageFont.truetype(font_fobj, 44)
time_format = "%d-%b-%y"
date_to_print = aufname_datum.strftime(time_format)
draw.text((44, 0.95*height)
, date_to_print
,(255,255,255)
,font=font) #
#im.show()
out_fname = cur_fname[:-4] + '.png'
#print out_fname
out_fobj = os.path.join(out_path, out_fname)
im.save(out_fobj, 'PNG')
# mencoder "mf://*.png" -o movie_out.avi -ovc x264 [-ofps 0.1]
# from mayavi doc: http://docs.enthought.com/mayavi/mayavi/tips.html#making-movies-from-a-stack-of-images
# mencoder "mf://*.png" -mf fps=1 -o anim.avi -ovc lavc -lavcopts vcodec=msmpeg4v2:vbitrate=500
# possibly this could be done using openCV
# v 3.0 is currently in beta
# http://stackoverflow.com/questions/14440400/creating-a-video-using-opencv-2-4-0-in-python
#http://opencv.org
print "Done! Yay!"
def get_exif(fn):
"""
this is what a typical exif dictionary looks like
{41990: 0,
'ColorSpace': 1,
'ComponentsConfiguration': '\x01\x02\x03\x00',
'DateTime': '2014:08:27 14:01:39',
'DateTimeDigitized': '2014:08:27 14:01:39',
'DateTimeOriginal': '2014:08:27 14:01:39',
'ExifImageHeight': 1280,
'ExifImageWidth': 960,
'ExifOffset': 140,
'ExifVersion': '0221',
'FlashPixVersion': '0100',
'Orientation': 1,
'ResolutionUnit': 2,
'Software': 'Dayli',
'XResolution': (72, 1),
'YResolution': (72, 1)}
"""
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
if __name__ == '__main__':
main()
|
clausTue/stamp_daily_images
|
process_iPhone_exports.py
|
Python
|
gpl-2.0
| 4,124
|
[
"Mayavi"
] |
9603ad2d12801545e3c79bec0b1097122dfed20afc9f97b77585ddbb0de1f553
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Ralph Hempel <rhempel@hempeldesigngroup.com>
# Copyright (c) 2015 Anton Vanhoucke <antonvh@gmail.com>
# Copyright (c) 2015 Denis Demidov <dennis.demidov@gmail.com>
# Copyright (c) 2015 Eric Pascual <eric@pobot.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
import sys
import os
import stat
import time
import _thread
from collections import OrderedDict
from ev3dev2 import get_current_platform, Device
from ev3dev2.stopwatch import StopWatch
from time import sleep
if sys.version_info < (3, 4):
raise SystemError('Must be using Python 3.4 or higher')
# Import the LED settings, this is platform specific
platform = get_current_platform()
if platform == 'ev3':
from ev3dev2._platform.ev3 import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
elif platform == 'evb':
from ev3dev2._platform.evb import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
elif platform == 'pistorms':
from ev3dev2._platform.pistorms import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
elif platform == 'brickpi':
from ev3dev2._platform.brickpi import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
elif platform == 'brickpi3':
from ev3dev2._platform.brickpi3 import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
elif platform == 'fake':
from ev3dev2._platform.fake import LEDS, LED_GROUPS, LED_COLORS, LED_DEFAULT_COLOR
else:
raise Exception("Unsupported platform '%s'" % platform)
class Led(Device):
"""
Any device controlled by the generic LED driver.
See https://www.kernel.org/doc/Documentation/leds/leds-class.txt
for more details.
"""
SYSTEM_CLASS_NAME = 'leds'
SYSTEM_DEVICE_NAME_CONVENTION = '*'
__slots__ = [
'_max_brightness',
'_brightness',
'_triggers',
'_trigger',
'_delay_on',
'_delay_off',
'desc',
]
def __init__(self, name_pattern=SYSTEM_DEVICE_NAME_CONVENTION, name_exact=False, desc=None, **kwargs):
self.desc = desc
super(Led, self).__init__(self.SYSTEM_CLASS_NAME, name_pattern, name_exact, **kwargs)
self._max_brightness = None
self._brightness = None
self._triggers = None
self._trigger = None
self._delay_on = None
self._delay_off = None
def __str__(self):
if self.desc:
return self.desc
else:
return Device.__str__(self)
@property
def max_brightness(self):
"""
Returns the maximum allowable brightness value.
"""
self._max_brightness, value = self.get_cached_attr_int(self._max_brightness, 'max_brightness')
return value
@property
def brightness(self):
"""
Sets the brightness level. Possible values are from 0 to ``max_brightness``.
"""
self._brightness, value = self.get_attr_int(self._brightness, 'brightness')
return value
@brightness.setter
def brightness(self, value):
self._brightness = self.set_attr_int(self._brightness, 'brightness', value)
@property
def triggers(self):
"""
Returns a list of available triggers.
"""
self._triggers, value = self.get_attr_set(self._triggers, 'trigger')
return value
@property
def trigger(self):
"""
Sets the LED trigger. A trigger is a kernel based source of LED events.
Triggers can either be simple or complex. A simple trigger isn't
configurable and is designed to slot into existing subsystems with
minimal additional code. Examples are the ``ide-disk`` and ``nand-disk``
triggers.
Complex triggers whilst available to all LEDs have LED specific
parameters and work on a per LED basis. The ``timer`` trigger is an example.
The ``timer`` trigger will periodically change the LED brightness between
0 and the current brightness setting. The ``on`` and ``off`` time can
be specified via ``delay_{on,off}`` attributes in milliseconds.
You can change the brightness value of a LED independently of the timer
trigger. However, if you set the brightness value to 0 it will
also disable the ``timer`` trigger.
"""
self._trigger, value = self.get_attr_from_set(self._trigger, 'trigger')
return value
@trigger.setter
def trigger(self, value):
self._trigger = self.set_attr_string(self._trigger, 'trigger', value)
# Workaround for ev3dev/ev3dev#225.
# When trigger is set to 'timer', we need to wait for 'delay_on' and
# 'delay_off' attributes to appear with correct permissions.
if value == 'timer':
for attr in ('delay_on', 'delay_off'):
path = self._path + '/' + attr
# Make sure the file has been created:
for _ in range(5):
if os.path.exists(path):
break
time.sleep(0.2)
else:
raise Exception('"{}" attribute has not been created'.format(attr))
# Make sure the file has correct permissions:
for _ in range(5):
mode = stat.S_IMODE(os.stat(path)[stat.ST_MODE])
if mode & stat.S_IRGRP and mode & stat.S_IWGRP:
break
time.sleep(0.2)
else:
raise Exception('"{}" attribute has wrong permissions'.format(attr))
@property
def delay_on(self):
"""
The ``timer`` trigger will periodically change the LED brightness between
0 and the current brightness setting. The ``on`` time can
be specified via ``delay_on`` attribute in milliseconds.
"""
# Workaround for ev3dev/ev3dev#225.
# 'delay_on' and 'delay_off' attributes are created when trigger is set
# to 'timer', and destroyed when it is set to anything else.
# This means the file cache may become outdated, and we may have to
# reopen the file.
for retry in (True, False):
try:
self._delay_on, value = self.get_attr_int(self._delay_on, 'delay_on')
return value
except OSError:
if retry:
self._delay_on = None
else:
raise
@delay_on.setter
def delay_on(self, value):
# Workaround for ev3dev/ev3dev#225.
# 'delay_on' and 'delay_off' attributes are created when trigger is set
# to 'timer', and destroyed when it is set to anything else.
# This means the file cache may become outdated, and we may have to
# reopen the file.
for retry in (True, False):
try:
self._delay_on = self.set_attr_int(self._delay_on, 'delay_on', value)
return
except OSError:
if retry:
self._delay_on = None
else:
raise
@property
def delay_off(self):
"""
The ``timer`` trigger will periodically change the LED brightness between
0 and the current brightness setting. The ``off`` time can
be specified via ``delay_off`` attribute in milliseconds.
"""
# Workaround for ev3dev/ev3dev#225.
# 'delay_on' and 'delay_off' attributes are created when trigger is set
# to 'timer', and destroyed when it is set to anything else.
# This means the file cache may become outdated, and we may have to
# reopen the file.
for retry in (True, False):
try:
self._delay_off, value = self.get_attr_int(self._delay_off, 'delay_off')
return value
except OSError:
if retry:
self._delay_off = None
else:
raise
@delay_off.setter
def delay_off(self, value):
"""
Workaround for ev3dev/ev3dev#225.
``delay_on`` and ``delay_off`` attributes are created when trigger is set
to ``timer``, and destroyed when it is set to anything else.
This means the file cache may become outdated, and we may have to
reopen the file.
"""
for retry in (True, False):
try:
self._delay_off = self.set_attr_int(self._delay_off, 'delay_off', value)
return
except OSError:
if retry:
self._delay_off = None
else:
raise
@property
def brightness_pct(self):
"""
Returns LED brightness as a fraction of max_brightness
"""
return float(self.brightness) / self.max_brightness
@brightness_pct.setter
def brightness_pct(self, value):
self.brightness = value * self.max_brightness
class Leds(object):
def __init__(self):
self.leds = OrderedDict()
self.led_groups = OrderedDict()
self.led_colors = LED_COLORS
self.animate_thread_id = None
self.animate_thread_stop = False
for (key, value) in LEDS.items():
self.leds[key] = Led(name_pattern=value, desc=key)
for (key, value) in LED_GROUPS.items():
self.led_groups[key] = []
for led_name in value:
self.led_groups[key].append(self.leds[led_name])
def __str__(self):
return self.__class__.__name__
def set_color(self, group, color, pct=1):
"""
Sets brightness of LEDs in the given group to the values specified in
color tuple. When percentage is specified, brightness of each LED is
reduced proportionally.
Example::
my_leds = Leds()
my_leds.set_color('LEFT', 'AMBER')
With a custom color::
my_leds = Leds()
my_leds.set_color('LEFT', (0.5, 0.3))
"""
# If this is a platform without LEDs there is nothing to do
if not self.leds:
return
color_tuple = color
if isinstance(color, str):
assert color in self.led_colors, \
"%s is an invalid LED color, valid choices are %s" % \
(color, ', '.join(self.led_colors.keys()))
color_tuple = self.led_colors[color]
assert group in self.led_groups, \
"%s is an invalid LED group, valid choices are %s" % \
(group, ', '.join(self.led_groups.keys()))
for led, value in zip(self.led_groups[group], color_tuple):
led.brightness_pct = value * pct
def set(self, group, **kwargs):
"""
Set attributes for each LED in group.
Example::
my_leds = Leds()
my_leds.set_color('LEFT', brightness_pct=0.5, trigger='timer')
"""
# If this is a platform without LEDs there is nothing to do
if not self.leds:
return
assert group in self.led_groups, \
"%s is an invalid LED group, valid choices are %s" % \
(group, ', '.join(self.led_groups.keys()))
for led in self.led_groups[group]:
for k in kwargs:
setattr(led, k, kwargs[k])
def all_off(self):
"""
Turn all LEDs off
"""
# If this is a platform without LEDs there is nothing to do
if not self.leds:
return
self.animate_stop()
for led in self.leds.values():
led.brightness = 0
def reset(self):
"""
Put all LEDs back to their default color
"""
if not self.leds:
return
self.animate_stop()
for group in self.led_groups:
self.set_color(group, LED_DEFAULT_COLOR)
def animate_stop(self):
"""
Signal the current animation thread to exit and wait for it to exit
"""
if self.animate_thread_id:
self.animate_thread_stop = True
while self.animate_thread_id:
pass
def animate_police_lights(self,
color1,
color2,
group1='LEFT',
group2='RIGHT',
sleeptime=0.5,
duration=5,
block=True):
"""
Cycle the ``group1`` and ``group2`` LEDs between ``color1`` and ``color2``
to give the effect of police lights. Alternate the ``group1`` and ``group2``
LEDs every ``sleeptime`` seconds.
Animate for ``duration`` seconds. If ``duration`` is None animate for forever.
Example:
.. code-block:: python
from ev3dev2.led import Leds
leds = Leds()
leds.animate_police_lights('RED', 'GREEN', sleeptime=0.75, duration=10)
"""
def _animate_police_lights():
self.all_off()
even = True
duration_ms = duration * 1000 if duration is not None else None
stopwatch = StopWatch()
stopwatch.start()
while True:
if even:
self.set_color(group1, color1)
self.set_color(group2, color2)
else:
self.set_color(group1, color2)
self.set_color(group2, color1)
if self.animate_thread_stop or stopwatch.is_elapsed_ms(duration_ms):
break
even = not even
sleep(sleeptime)
self.animate_thread_stop = False
self.animate_thread_id = None
self.animate_stop()
if block:
_animate_police_lights()
else:
self.animate_thread_id = _thread.start_new_thread(_animate_police_lights, ())
def animate_flash(self, color, groups=('LEFT', 'RIGHT'), sleeptime=0.5, duration=5, block=True):
"""
Turn all LEDs in ``groups`` off/on to ``color`` every ``sleeptime`` seconds
Animate for ``duration`` seconds. If ``duration`` is None animate for forever.
Example:
.. code-block:: python
from ev3dev2.led import Leds
leds = Leds()
leds.animate_flash('AMBER', sleeptime=0.75, duration=10)
"""
def _animate_flash():
even = True
duration_ms = duration * 1000 if duration is not None else None
stopwatch = StopWatch()
stopwatch.start()
while True:
if even:
for group in groups:
self.set_color(group, color)
else:
self.all_off()
if self.animate_thread_stop or stopwatch.is_elapsed_ms(duration_ms):
break
even = not even
sleep(sleeptime)
self.animate_thread_stop = False
self.animate_thread_id = None
self.animate_stop()
if block:
_animate_flash()
else:
self.animate_thread_id = _thread.start_new_thread(_animate_flash, ())
def animate_cycle(self, colors, groups=('LEFT', 'RIGHT'), sleeptime=0.5, duration=5, block=True):
"""
Cycle ``groups`` LEDs through ``colors``. Do this in a loop where
we display each color for ``sleeptime`` seconds.
Animate for ``duration`` seconds. If ``duration`` is None animate for forever.
Example:
.. code-block:: python
from ev3dev2.led import Leds
leds = Leds()
leds.animate_cycle(('RED', 'GREEN', 'AMBER'))
"""
def _animate_cycle():
index = 0
max_index = len(colors)
duration_ms = duration * 1000 if duration is not None else None
stopwatch = StopWatch()
stopwatch.start()
while True:
for group in groups:
self.set_color(group, colors[index])
index += 1
if index == max_index:
index = 0
if self.animate_thread_stop or stopwatch.is_elapsed_ms(duration_ms):
break
sleep(sleeptime)
self.animate_thread_stop = False
self.animate_thread_id = None
self.animate_stop()
if block:
_animate_cycle()
else:
self.animate_thread_id = _thread.start_new_thread(_animate_cycle, ())
def animate_rainbow(self, group1='LEFT', group2='RIGHT', increment_by=0.1, sleeptime=0.1, duration=5, block=True):
"""
Gradually fade from one color to the next
Animate for ``duration`` seconds. If ``duration`` is None animate for forever.
Example:
.. code-block:: python
from ev3dev2.led import Leds
leds = Leds()
leds.animate_rainbow()
"""
def _animate_rainbow():
# state 0: (LEFT,RIGHT) from (0,0) to (1,0)...RED
# state 1: (LEFT,RIGHT) from (1,0) to (1,1)...AMBER
# state 2: (LEFT,RIGHT) from (1,1) to (0,1)...GREEN
# state 3: (LEFT,RIGHT) from (0,1) to (0,0)...OFF
state = 0
left_value = 0
right_value = 0
MIN_VALUE = 0
MAX_VALUE = 1
self.all_off()
duration_ms = duration * 1000 if duration is not None else None
stopwatch = StopWatch()
stopwatch.start()
while True:
if state == 0:
left_value += increment_by
elif state == 1:
right_value += increment_by
elif state == 2:
left_value -= increment_by
elif state == 3:
right_value -= increment_by
else:
raise Exception("Invalid state {}".format(state))
# Keep left_value and right_value within the MIN/MAX values
left_value = min(left_value, MAX_VALUE)
right_value = min(right_value, MAX_VALUE)
left_value = max(left_value, MIN_VALUE)
right_value = max(right_value, MIN_VALUE)
self.set_color(group1, (left_value, right_value))
self.set_color(group2, (left_value, right_value))
if state == 0 and left_value == MAX_VALUE:
state = 1
elif state == 1 and right_value == MAX_VALUE:
state = 2
elif state == 2 and left_value == MIN_VALUE:
state = 3
elif state == 3 and right_value == MIN_VALUE:
state = 0
if self.animate_thread_stop or stopwatch.is_elapsed_ms(duration_ms):
break
sleep(sleeptime)
self.animate_thread_stop = False
self.animate_thread_id = None
self.animate_stop()
if block:
_animate_rainbow()
else:
self.animate_thread_id = _thread.start_new_thread(_animate_rainbow, ())
|
dwalton76/ev3dev-lang-python
|
ev3dev2/led.py
|
Python
|
mit
| 20,477
|
[
"Amber"
] |
e80ffc1dc9f956c6d923ef7d234d14c8831ad4187398ced87b159f0da128ba34
|
"""
Fit redshifts and classifications on DESI bricks
"""
import sys
import os
import numpy as np
import multiprocessing
import traceback
from desispec import io
from desispec.interpolation import resample_flux
from desispec.log import get_logger, WARNING
from desispec.zfind.redmonster import RedMonsterZfind
from desispec.zfind import ZfindBase
from desispec.io.qa import load_qa_brick, write_qa_brick
from desispec.util import default_nproc, dist_uniform
import argparse
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit redshifts and classifications on bricks.")
parser.add_argument("-b", "--brick", type=str, required=False,
help="input brickname")
parser.add_argument("-n", "--nspec", type=int, required=False,
help="number of spectra to fit [default: all]")
parser.add_argument("--first-spec", type=int, required=False,default=0,
help="first spectrum to fit in file")
parser.add_argument("-o", "--outfile", type=str, required=False,
help="output file name")
parser.add_argument("--specprod_dir", type=str, required=False, default=None,
help="override $DESI_SPECTRO_REDUX/$SPECPROD environment variable path")
parser.add_argument("--objtype", type=str, required=False,
help="only use templates for these objtypes (comma separated elg,lrg,qso,star)")
parser.add_argument('--zrange-galaxy', type=float, default=(0.0, 1.6), nargs=2,
help='minimum and maximum galaxy redshifts to consider')
parser.add_argument('--zrange-qso', type=float, default=(0.0, 3.5), nargs=2,
help='minimum and maximum QSO redshifts to consider')
parser.add_argument('--zrange-star', type=float, default=(-0.005, 0.005), nargs=2,
help='minimum and maximum stellar redshifts to consider')
parser.add_argument("--zspec", action="store_true",
help="also include spectra in output file")
parser.add_argument('--qafile', type=str,
help='path of QA file.')
parser.add_argument('--qafig', type=str,
help='path of QA figure file')
parser.add_argument("--nproc", type=int, default=default_nproc,
help="number of parallel processes for multiprocessing")
parser.add_argument("--npoly", type=int, default=2,
help="number of parameters for additive polynomial")
parser.add_argument("brickfiles", nargs="*")
parser.add_argument("--print-info",type=str,help="print an info table on each spectrum and exit")
args = None
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args, comm=None) :
log = get_logger()
if args.npoly < 0 :
log.warning("Need npoly>=0, changing this %d -> 1"%args.npoly)
args.npoly=0
if args.nproc < 1 :
log.warning("Need nproc>=1, changing this %d -> 1"%args.nproc)
args.nproc=1
if comm is not None:
if args.nproc != 1:
if comm.rank == 0:
log.warning("Using MPI, forcing multiprocessing nproc -> 1")
args.nproc = 1
if args.objtype is not None:
args.objtype = args.objtype.split(',')
#- Read brick files for each channel
if (comm is None) or (comm.rank == 0):
log.info("Reading bricks")
brick = dict()
if args.brick is not None:
if len(args.brickfiles) != 0:
raise RuntimeError('Give -b/--brick or input brickfiles but not both')
for channel in ('b', 'r', 'z'):
filename = None
if (comm is None) or (comm.rank == 0):
filename = io.findfile('brick', band=channel, brickname=args.brick,
specprod_dir=args.specprod_dir)
if comm is not None:
filename = comm.bcast(filename, root=0)
brick[channel] = io.Brick(filename)
else:
for filename in args.brickfiles:
bx = io.Brick(filename)
if bx.channel not in brick:
brick[bx.channel] = bx
else:
if (comm is None) or (comm.rank == 0):
log.error('Channel {} in multiple input files'.format(bx.channel))
sys.exit(2)
filters=brick.keys()
for fil in filters:
if (comm is None) or (comm.rank == 0):
log.info("Filter found: "+fil)
#- Assume all channels have the same number of targets
#- TODO: generalize this to allow missing channels
#if args.nspec is None:
# args.nspec = brick['b'].get_num_targets()
# log.info("Fitting {} targets".format(args.nspec))
#else:
# log.info("Fitting {} of {} targets".format(args.nspec, brick['b'].get_num_targets()))
#- Coadd individual exposures and combine channels
#- Full coadd code is a bit slow, so try something quick and dirty for
#- now to get something going for redshifting
if (comm is None) or (comm.rank == 0):
log.info("Combining individual channels and exposures")
wave=[]
for fil in filters:
wave=np.concatenate([wave,brick[fil].get_wavelength_grid()])
np.ndarray.sort(wave)
nwave = len(wave)
#- flux and ivar arrays to fill for all targets
#flux = np.zeros((nspec, nwave))
#ivar = np.zeros((nspec, nwave))
flux = []
ivar = []
good_targetids=[]
targetids = brick['b'].get_target_ids()
fpinfo = None
if args.print_info is not None:
if (comm is None) or (comm.rank == 0):
fpinfo = open(args.print_info,"w")
for i, targetid in enumerate(targetids):
#- wave, flux, and ivar for this target; concatenate
xwave = list()
xflux = list()
xivar = list()
good=True
for channel in filters:
exp_flux, exp_ivar, resolution, info = brick[channel].get_target(targetid)
weights = np.sum(exp_ivar, axis=0)
ii, = np.where(weights > 0)
if len(ii)==0:
good=False
break
xwave.extend(brick[channel].get_wavelength_grid()[ii])
#- Average multiple exposures on the same wavelength grid for each channel
xflux.extend(np.average(exp_flux[:,ii], weights=exp_ivar[:,ii], axis=0))
xivar.extend(weights[ii])
if not good:
continue
xwave = np.array(xwave)
xivar = np.array(xivar)
xflux = np.array(xflux)
ii = np.argsort(xwave)
#flux[i], ivar[i] = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
fl, iv = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
flux.append(fl)
ivar.append(iv)
good_targetids.append(targetid)
if not args.print_info is None:
s2n = np.median(fl[:-1]*np.sqrt(iv[:-1])/np.sqrt(wave[1:]-wave[:-1]))
if (comm is None) or (comm.rank == 0):
print targetid,s2n
fpinfo.write(str(targetid)+" "+str(s2n)+"\n")
if not args.print_info is None:
if (comm is None) or (comm.rank == 0):
fpinfo.close()
sys.exit()
good_targetids=good_targetids[args.first_spec:]
flux=np.array(flux[args.first_spec:])
ivar=np.array(ivar[args.first_spec:])
nspec=len(good_targetids)
if (comm is None) or (comm.rank == 0):
log.info("number of good targets = %d"%nspec)
if (args.nspec is not None) and (args.nspec < nspec):
if (comm is None) or (comm.rank == 0):
log.info("Fitting {} of {} targets".format(args.nspec, nspec))
nspec=args.nspec
good_targetids=good_targetids[:nspec]
flux=flux[:nspec]
ivar=ivar[:nspec]
else :
if (comm is None) or (comm.rank == 0):
log.info("Fitting {} targets".format(nspec))
if (comm is None) or (comm.rank == 0):
log.debug("flux.shape={}".format(flux.shape))
zf = None
if comm is None:
# Use multiprocessing built in to RedMonster.
zf = RedMonsterZfind(wave= wave,flux= flux,ivar=ivar,
objtype=args.objtype,zrange_galaxy= args.zrange_galaxy,
zrange_qso=args.zrange_qso,zrange_star=args.zrange_star,
nproc=args.nproc,npoly=args.npoly)
else:
# Use MPI
# distribute the spectra among processes
my_firstspec, my_nspec = dist_uniform(nspec, comm.size, comm.rank)
my_specs = slice(my_firstspec, my_firstspec + my_nspec)
for p in range(comm.size):
if p == comm.rank:
if my_nspec > 0:
log.info("process {} fitting spectra {} - {}".format(p, my_firstspec, my_firstspec+my_nspec-1))
else:
log.info("process {} idle".format(p))
sys.stdout.flush()
comm.barrier()
# do redshift fitting on each process
myzf = None
if my_nspec > 0:
savelevel = os.environ["DESI_LOGLEVEL"]
os.environ["DESI_LOGLEVEL"] = "WARNING"
myzf = RedMonsterZfind(wave=wave, flux=flux[my_specs,:], ivar=ivar[my_specs,:],
objtype=args.objtype,zrange_galaxy= args.zrange_galaxy,
zrange_qso=args.zrange_qso,zrange_star=args.zrange_star,
nproc=args.nproc,npoly=args.npoly)
os.environ["DESI_LOGLEVEL"] = savelevel
# Combine results into a single ZFindBase object on the root process.
# We could do this with a gather, but we are using a small number of
# processes, and point-to-point communication is easier for people to
# understand.
if comm.rank == 0:
zf = ZfindBase(myzf.wave, np.zeros((nspec, myzf.nwave)), np.zeros((nspec, myzf.nwave)), R=None, results=None)
for p in range(comm.size):
if comm.rank == 0:
if p == 0:
# root process copies its own data into output
zf.flux[my_specs] = myzf.flux
zf.ivar[my_specs] = myzf.ivar
zf.model[my_specs] = myzf.model
zf.z[my_specs] = myzf.z
zf.zerr[my_specs] = myzf.zerr
zf.zwarn[my_specs] = myzf.zwarn
zf.spectype[my_specs] = myzf.spectype
zf.subtype[my_specs] = myzf.subtype
else:
# root process receives from process p and copies
# it into the output.
p_nspec = comm.recv(source=p, tag=0)
# only proceed if the sending process actually
# has some spectra assigned to it.
if p_nspec > 0:
p_firstspec = comm.recv(source=p, tag=1)
p_slice = slice(p_firstspec, p_firstspec+p_nspec)
p_flux = comm.recv(source=p, tag=2)
zf.flux[p_slice] = p_flux
p_ivar = comm.recv(source=p, tag=3)
zf.ivar[p_slice] = p_ivar
p_model = comm.recv(source=p, tag=4)
zf.model[p_slice] = p_model
p_z = comm.recv(source=p, tag=5)
zf.z[p_slice] = p_z
p_zerr = comm.recv(source=p, tag=6)
zf.zerr[p_slice] = p_zerr
p_zwarn = comm.recv(source=p, tag=7)
zf.zwarn[p_slice] = p_zwarn
p_type = comm.recv(source=p, tag=8)
zf.spectype[p_slice] = p_type
p_subtype = comm.recv(source=p, tag=9)
zf.subtype[p_slice] = p_subtype
else:
if p == comm.rank:
# process p sends to root
comm.send(my_nspec, dest=0, tag=0)
if my_nspec > 0:
comm.send(my_firstspec, dest=0, tag=1)
comm.send(myzf.flux, dest=0, tag=2)
comm.send(myzf.ivar, dest=0, tag=3)
comm.send(myzf.model, dest=0, tag=4)
comm.send(myzf.z, dest=0, tag=5)
comm.send(myzf.zerr, dest=0, tag=6)
comm.send(myzf.zwarn, dest=0, tag=7)
comm.send(myzf.spectype, dest=0, tag=8)
comm.send(myzf.subtype, dest=0, tag=9)
comm.barrier()
if (comm is None) or (comm.rank == 0):
# The full results exist only on the rank zero process.
# reformat results
dtype = list()
dtype = [
('Z', zf.z.dtype),
('ZERR', zf.zerr.dtype),
('ZWARN', zf.zwarn.dtype),
('SPECTYPE', zf.spectype.dtype),
('SUBTYPE', zf.subtype.dtype),
]
formatted_data = np.empty(nspec, dtype=dtype)
formatted_data['Z'] = zf.z
formatted_data['ZERR'] = zf.zerr
formatted_data['ZWARN'] = zf.zwarn
formatted_data['SPECTYPE'] = zf.spectype
formatted_data['SUBTYPE'] = zf.subtype
# Create a ZfindBase object with formatted results
zfi = ZfindBase(None, None, None, results=formatted_data)
zfi.nspec = nspec
# QA
if (args.qafile is not None) or (args.qafig is not None):
log.info("performing skysub QA")
# Load
qabrick = load_qa_brick(args.qafile)
# Run
qabrick.run_qa('ZBEST', (zfi,brick))
# Write
if args.qafile is not None:
write_qa_brick(args.qafile, qabrick)
log.info("successfully wrote {:s}".format(args.qafile))
# Figure(s)
if args.qafig is not None:
raise IOError("Not yet implemented")
qa_plots.brick_zbest(args.qafig, zfi, qabrick)
#- Write some output
if args.outfile is None:
args.outfile = io.findfile('zbest', brickname=args.brick)
log.info("Writing "+args.outfile)
#io.write_zbest(args.outfile, args.brick, targetids, zfi, zspec=args.zspec)
io.write_zbest(args.outfile, args.brick, good_targetids, zfi, zspec=args.zspec)
return
|
timahutchinson/desispec
|
py/desispec/scripts/zfind.py
|
Python
|
bsd-3-clause
| 14,535
|
[
"Galaxy"
] |
6f031aa1b389662e731fcac7f6e1e3f9d23342447f264578f578f21e061fbfbc
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This module defines an interface to CASTEP for
use by the ASE (Webpage: http://wiki.fysik.dtu.dk/ase)
Authors:
Max Hoffmann, max.hoffmann@ch.tum.de
Jörg Meyer, joerg.meyer@ch.tum.de
"""
__all__ = [
'Castep',
'CastepCell',
'CastepParam',
'create_castep_keywords']
contact_email = 'max.hoffmann@ch.tum.de'
from copy import deepcopy
import difflib
import numpy as np
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import ase
from ase.calculators.general import Calculator
from ase.constraints import FixCartesian
from ase.parallel import paropen
# Adapt import path to give local versions of castep_keywords
# a higher priority, assuming that personal folder will be
# standardized at ~/.ase, watch [ase-developers]
sys.path = ['',
os.path.expanduser('~/.ase'),
os.path.join(ase.__path__[0], 'calculators')] + sys.path
class Castep(Calculator):
r"""
CASTEP Interface Documentation
Introduction
============
CASTEP_ [1]_ is a software package which uses density functional theory to
provide a good atomic-level description of all manner of materials and
molecules. CASTEP can give information about total energies, forces and
stresses on an atomic system, as well as calculating optimum geometries, band
structures, optical spectra, phonon spectra and much more. It can also perform
molecular dynamics simulations.
The CASTEP calculator interface class offers intuitive access to all CASTEP
settings and most results. All CASTEP specific settings are accessible via
attribute access (*i.e*. ``calc.param.keyword = ...`` or
``calc.cell.keyword = ...``)
Getting Started:
================
Set the environment variables appropriately for your system.
>>> export CASTEP_COMMAND=' ... '
>>> export CASTEP_PP_PATH=' ... '
Running the Calculator
======================
The default initialization command for the CASTEP calculator is
.. class:: Castep(directory='CASTEP', label='castep')
To do a minimal run one only needs to set atoms, this will use all
default settings of CASTEP, meaning LDA, singlepoint, etc..
With a generated castep_keywords.py in place all options are accessible
by inspection, *i.e.* tab-completion. This works best when using `ipython`.
All options can be accessed via ``calc.param.<TAB>`` or ``calc.cell.<TAB>``
and documentation is printed with ``calc.param.<keyword> ?`` or
``calc.cell.<keyword> ?``. All options can also be set directly
using ``calc.keyword = ...`` or ``calc.KEYWORD = ...`` or even
``calc.KeYwOrD`` or directly as named arguments in the call to the constructor
(*e.g.* ``Castep(task='GeometryOptimization')``).
All options that go into the ``.param`` file are held in an ``CastepParam``
instance, while all options that go into the ``.cell`` file and don't belong
to the atoms object are held in an ``CastepCell`` instance. Each instance can
be created individually and can be added to calculators by attribute
assignment, *i.e.* ``calc.param = param`` or ``calc.cell = cell``.
All internal variables of the calculator start with an underscore (_).
All cell attributes that clearly belong into the atoms object are blocked.
Setting ``calc.atoms_attribute`` (*e.g.* ``= positions``) is sent directly to
the atoms object.
Arguments:
==========
========================= ====================================================
Keyword Description
========================= ====================================================
``directory`` The relative path where all input and output files
will be placed. If this does not exist, it will be
created. Existing directories will be moved to
directory-TIMESTAMP unless self._rename_existing_dir
is set to false.
``label`` The prefix of .param, .cell, .castep, etc. files.
========================= ====================================================
Additional Settings
===================
========================= ====================================================
Internal Setting Description
========================= ====================================================
``_castep_command`` (``=castep``): the actual shell command used to
call CASTEP.
``_check_checkfile`` (``=True``): this makes write_param() only
write a continue or reuse statement if the
addressed .check or .castep_bin file exists in the
directory.
``_copy_pspots`` (``=False``): if set to True the calculator will
actually copy the needed pseudo-potential (\*.usp)
file, usually it will only create symlinks.
``_export_settings`` (``=True``): if this is set to
True, all calculator internal settings shown here
will be included in the .param in a comment line (#)
and can be read again by merge_param. merge_param
can be forced to ignore this directive using the
optional argument ``ignore_internal_keys=True``.
``_force_write`` (``=True``): this controls wether the \*cell and
\*param will be overwritten.
``_prepare_input_only`` (``=False``): If set to True, the calculator will
create \*cell und \*param file but not
start the calculation itself.
If this is used to prepare jobs locally
and run on a remote cluster it is recommended
to set ``_copy_pspots = True``.
``_castep_pp_path`` (``='.'``) : the place where the calculator
will look for pseudo-potential files.
``_rename_existing_dir`` (``=True``) : when using a new instance
of the calculator, this will move directories out of
the way that would be overwritten otherwise,
appending a date string.
``_set_atoms`` (``=False``) : setting this to True will overwrite
any atoms object previously attached to the
calculator when reading a \.castep file. By de-
fault, the read() function will only create a new
atoms object if none has been attached and other-
wise try to assign forces etc. based on the atom's
positions. ``_set_atoms=True`` could be necessary
if one uses CASTEP's internal geometry optimization
(``calc.param.task='GeometryOptimization'``)
because then the positions get out of sync.
*Warning*: this option is generally not recommended
unless one knows one really needs it. There should
never be any need, if CASTEP is used as a
single-point calculator.
``_track_output`` (``=False``) : if set to true, the interface
will append a number to the label on all input
and output files, where n is the number of calls
to this instance. *Warning*: this setting may con-
sume a lot more disk space because of the additio-
nal \*check files.
``_try_reuse`` (``=_track_output``) : when setting this, the in-
terface will try to fetch the reuse file from the
previous run even if _track_output is True. By de-
fault it is equal to _track_output, but may be
overridden.
Since this behavior may not always be desirable for
single-point calculations. Regular reuse for *e.g.*
a geometry-optimization can be achieved by setting
``calc.param.reuse = True``.
========================= ====================================================
Special features:
=================
``.dryrun_ok()``
Runs ``castep_command seed -dryrun`` in a temporary directory return True if
all variables initialized ok. This is a fast way to catch errors in the
input. Afterwards _kpoints_used is set.
``.merge_param()``
Takes a filename or filehandler of a .param file or CastepParam instance and
merges it into the current calculator instance, overwriting current settings
``.keyword.clear()``
Can be used on any option like ``calc.param.keyword.clear()`` or
``calc.cell.keyword.clear()`` to return to the CASTEP default.
``.initialize()``
Creates all needed input in the ``_directory``. This can then copied to and
run in a place without ASE or even python.
``.set_pspot('<library>')``
This automatically sets the pseudo-potential for all present species to
*<Species>_<library>.usp*. Make sure that ``_castep_pp_path`` is set
correctly.
``print(calc)``
Prints a short summary of the calculator settings and atoms.
``ase.io.castep.read_seed('path-to/seed')``
Given you have a combination of seed.{param,cell,castep} this will return an
atoms object with the last ionic positions in the .castep file and all other
settings parsed from the .cell and .param file. If no .castep file is found
the positions are taken from the .cell file. The output directory will be
set to the same directory, only the label is preceded by 'copy_of\_' to
avoid overwriting.
Notes/Issues:
==============
* Currently *only* the FixAtoms *constraint* is fully supported for
reading and writing.
* There is no support for the CASTEP *unit system*. Units of eV and Angstrom
are used throughout. In particular when converting total energies from
different calculators, one should check that the same CODATA_ version is
used for constants and conversion factors, respectively.
.. _CASTEP: http://www.castep.org/
.. _CODATA: http://physics.nist.gov/cuu/Constants/index.html
.. [1] S. J. Clark, M. D. Segall, C. J. Pickard, P. J. Hasnip, M. J. Probert,
K. Refson, M. C. Payne Zeitschrift für Kristallographie 220(5-6)
pp.567- 570 (2005)
`online <http://goo.gl/tRJ7x>`_
End CASTEP Interface Documentation
"""
# Class attributes !
# keys set through atoms object
atoms_keys = [
'charge',
'ionic_constraints',
'lattice_abs',
'lattice_cart',
'positions_abs',
'positions_abs_final',
'positions_abs_intermediate',
'positions_frac',
'positions_frac_final',
'positions_frac_intermediate',
]
atoms_obj_keys = [
'dipole',
'energy_free',
'energy_zero',
'fermi',
'forces',
'nbands',
'positions',
'stress',
]
internal_keys = [
'_castep_command',
'_check_checkfile',
'_copy_pspots',
'_directory',
'_export_settings',
'_force_write',
'_label',
'_prepare_input_only',
'_castep_pp_path',
'_rename_existing_dir',
'_set_atoms',
'_track_output',
'_try_reuse',
]
def __init__(self, directory='CASTEP', label='castep',
castep_command=None, check_castep_version=False,
castep_pp_path=None,
**kwargs):
self.__name__ = 'Castep'
# initialize the ase.calculators.general calculator
Calculator.__init__(self)
from ase.io.castep import write_cell
self._write_cell = write_cell
castep_keywords = import_castep_keywords()
self.param = CastepParam()
self.cell = CastepCell()
###################################
# Calculator state variables #
###################################
self._calls = 0
self._castep_version = castep_keywords.castep_version
# collects warning from .castep files
self._warnings = []
# collects content from *.err file
self._error = None
# warnings raised by the ASE interface
self._interface_warnings = []
# store to check if recalculation is necessary
self._old_atoms = None
self._old_cell = None
self._old_param = None
###################################
# Internal keys #
# Allow to tweak the behavior #
###################################
self._opt = {}
self._castep_command = get_castep_command(castep_command)
self._castep_pp_path = get_castep_pp_path(castep_pp_path)
self._check_checkfile = True
self._copy_pspots = False
self._directory = os.path.abspath(directory)
self._export_settings = True
self._force_write = True
self._label = label
self._prepare_input_only = False
self._rename_existing_dir = True
self._set_atoms = False
self._track_output = False
self._try_reuse = False
# will be set on during runtime
self._seed = None
###################################
# (Physical) result variables #
###################################
self.atoms = None
# initialize result variables
self._forces = None
self._energy_total = None
self._energy_free = None
self._energy_0K = None
self._number_of_cell_constraints = None
self._output_verbosity = None
self._stress = None
self._unit_cell = None
self._kpoints = None
# pointers to other files used at runtime
self._check_file = None
self._castep_bin_file = None
# check version of CASTEP options module against current one
if check_castep_version:
local_castep_version = get_castep_version(self._castep_command)
if not hasattr(self, '_castep_version'):
print("No castep version found")
return
if not local_castep_version == self._castep_version:
print(('The options module was generated from version %s\n'
+ 'while your are currently using CASTEP version %s')
% (self._castep_version,
get_castep_version(self._castep_command)))
self._castep_version = local_castep_version
# processes optional arguments in kw style
for keyword, value in kwargs.iteritems():
self.__setattr__(keyword, value)
def _castep_find_last_record(self, castep_file):
"""Checks wether a given castep file has a regular
ending message following the last banner message. If this
is the case, the line number of the last banner is message
is return, otherwise False.
returns (record_start, record_end, end_found, last_record_complete)
"""
if type(castep_file) is str:
castep_file = paropen(castep_file, 'r')
file_opened = True
else:
file_opened = False
record_starts = []
while True:
line = castep_file.readline()
if 'Welcome' in line and 'CASTEP' in line:
record_starts = [castep_file.tell()] + record_starts
if not line:
break
if record_starts == []:
print("Could not find CASTEP label in result file: %s"
% castep_file)
print("Are you sure this is a .castep file?")
return
# search for regular end of file
end_found = False
# start to search from record beginning from the back
# and see if
record_end = -1
for record_nr, record_start in enumerate(record_starts):
castep_file.seek(record_start)
while True:
line = castep_file.readline()
if not line:
break
if 'warn' in line.lower():
self._warnings.append(line)
if 'Writing analysis data to' in line:
#if 'Writing model to' in line:
end_found = True
record_end = castep_file.tell()
break
if end_found:
break
if file_opened:
castep_file.close()
if end_found:
# record_nr == 0 corresponds to the last record here
if record_nr == 0:
return (record_start, record_end, True, True)
else:
return (record_start, record_end, True, False)
else:
return (0, record_end, False, False)
def read(self, castep_file=None):
"""Read a castep file into the current instance."""
if castep_file is None:
if self._castep_file:
castep_file = self._castep_file
else:
print('No CASTEP file specified')
return
if not os.path.exists(castep_file):
print('No CASTEP file found')
if self._seed is None:
self._seed = os.path.splitext(os.path.basename(castep_file))[0]
err_file = '%s.0001.err' % self._seed
if os.path.exists(err_file):
err_file = paropen(err_file)
self._error = err_file.read()
err_file.close()
# we return right-away because it might
# just be here from a previous run
# look for last result, if several CASTEP
# run are appended
out = paropen(castep_file, 'r')
record_start, record_end, end_found, _\
= self._castep_find_last_record(out)
if not end_found:
print("No regular end found in %s file" % castep_file)
print(self._error)
out.close()
return
# we return here, because the file has no a regular end
# now iterate over last CASTEP output in file to extract information
# could be generalized as well to extract trajectory from file
# holding several outputs
n_cell_const = 0
forces = []
stress = []
out.seek(record_start)
while True:
try:
line = out.readline()
if not line or out.tell() > record_end:
break
elif "output verbosity" in line:
iprint = int(line.split()[-1][1])
if int(iprint) != 1:
self.param.iprint = iprint
elif "Unit Cell" in line:
lattice_real = []
lattice_reci = []
while True:
line = out.readline()
fields = line.split()
if len(fields) == 6:
break
for i in range(3):
lattice_real.append(map(float, fields[0:3]))
lattice_reci.append(map(float, fields[3:7]))
line = out.readline()
fields = line.split()
elif "Cell Contents" in line:
while True:
line = out.readline()
if "Total number of ions in cell" in line:
n_atoms = int(line.split()[7])
if "Total number of species in cell" in line:
_ = int(line.split()[7])
fields = line.split()
if len(fields) == 0:
break
elif "Fractional coordinates of atoms" in line:
species = []
positions_frac = []
# positions_cart = []
while True:
line = out.readline()
fields = line.split()
if len(fields) == 7:
break
for n in range(n_atoms):
species.append(fields[1])
positions_frac.append(map(float, fields[3:6]))
line = out.readline()
fields = line.split()
elif "Files used for pseudopotentials" in line:
while True:
line = out.readline()
if 'Pseudopotential generated on-the-fly' in line:
continue
fields = line.split()
if (len(fields) >= 2):
elem, pp_file = fields
self.cell.species_pot = (elem, pp_file)
else:
break
elif "k-Points For BZ Sampling" in line:
# TODO: generalize for non-Monkhorst Pack case
# (i.e. kpoint lists) -
# kpoints_offset cannot be read this way and
# is hence always set to None
while True:
line = out.readline()
if not line.strip():
break
if "MP grid size for SCF calculation" in line:
#kpoints = ' '.join(line.split()[-3:])
#self.kpoints_mp_grid = kpoints
#self.kpoints_mp_offset = '0. 0. 0.'
# not set here anymore because otherwise
# two calculator objects go out of sync
# after each calculation triggering unecessary
# recalculation
break
elif "Symmetry and Constraints" in line:
self.read_symops(castep_castep=out)
elif "Number of cell constraints" in line:
n_cell_const = int(line.split()[4])
elif "Final energy" in line:
self._energy_total = float(line.split()[-2])
elif "Final free energy" in line:
self._energy_free = float(line.split()[-2])
elif "NB est. 0K energy" in line:
self._energy_0K = float(line.split()[-2])
# remember to remove constraint labels in force components
# (lacking a space behind the actual floating point number in
# the CASTEP output)
elif "******************** Forces *********************"\
in line or\
"************** Symmetrised Forces ***************"\
in line:
fix = []
fix_cart = []
forces = []
while True:
line = out.readline()
fields = line.split()
if len(fields) == 7:
break
for n in range(n_atoms):
consd = np.array([0, 0, 0])
fxyz = [0, 0, 0]
for (i, force_component) in enumerate(fields[-4:-1]):
if force_component.count("(cons'd)") > 0:
consd[i] = 1
fxyz[i] = float(force_component.replace(
"(cons'd)", ""))
if consd.all():
fix.append(n)
elif consd.any():
fix_cart.append(FixCartesian(n, consd))
forces.append(fxyz)
line = out.readline()
fields = line.split()
elif "***************** Stress Tensor *****************"\
in line:
stress = []
while True:
line = out.readline()
fields = line.split()
if len(fields) == 6:
break
for n in range(3):
stress.append(map(float, fields[2:5]))
line = out.readline()
fields = line.split()
elif "BFGS: starting iteration" in line \
or "BFGS: improving iteration" in line:
if n_cell_const < 6:
lattice_real = []
lattice_reci = []
species = []
positions_frac = []
#positions_cart = []
forces = []
stress = []
self._stress = stress
elif "BFGS: Final Configuration:" in line:
break
elif 'warn' in line.lower():
self._warnings.append(line)
except Exception, exception:
print line,
print "|-> line triggered exception: " + str(exception)
raise
out.close()
positions_frac_atoms = np.array(positions_frac)
forces_atoms = np.array(forces)
if self.atoms and not self._set_atoms:
# compensate for internal reordering of atoms by CASTEP
# to check if all atoms are assigned
atoms_assigned = [False] * len(self.atoms)
positions_frac_ase = self.atoms.get_scaled_positions()
positions_frac_castep = (np.array(positions_frac) % 1) % 1
# % is necessary because CASTEP output may contain fractional
# coordinates > 1, which does not affect the calculation though
# Source: http://goo.gl/xfwri
# And, yes, the % needs to be done twice, see
# ase.atoms.Atoms.get_scaled_positions
species_castep = list(species)
forces_castep = np.array(forces)
tolerance = 1E-5
for n in range(n_atoms):
for m in range(n_atoms):
if (np.linalg.norm(positions_frac_ase[n] \
- positions_frac_castep[m], 1) < tolerance):
if atoms_assigned[n]:
raise UserWarning('Castep().read() tried to' + \
' assign forces twice to the same' + \
' atom.\n Please file a bug report to %s' + \
' and attach your input files.' \
% contact_email)
species[n] = species_castep[m]
positions_frac_atoms[n] = \
np.array(positions_frac_castep[m])
forces_atoms[n] = np.array(forces_castep[m])
atoms_assigned[n] = True
if not all(atoms_assigned):
print('%s atoms not assigned.' % atoms_assigned.count(False))
print('The following list is True for all assigned atoms: %s'\
% atoms_assigned)
print('If you are trying to read a .castep where the atom\'s')
print('positions have changed with respect to the atoms')
print('object, set calc._set_atoms = True\n')
print('On the other hand _set_atoms = True is not')
print('recommended if CASTEP is only used as a single-point')
print('calculator (e.g. in an ASE geometry optimzation)')
print('as this might cause redundant recalculations.')
raise UserWarning('Castep().read() did not assign forces' + \
' and positions to all input atoms\n' + \
'If you think it should have assigned all of them,' + \
' please file a bug report with your input file(s)' + \
'to\n\n\t%s' % contact_email)
else:
# If no atoms, object has been previously defined
# we define it here and set the Castep() instance as calculator.
# This covers the case that we simply want to open a .castep file.
# The next time around we will have an atoms object, since
# set_calculator also set atoms in the calculator.
if self.atoms:
constraints = self.atoms.constraints
else:
constraints = []
atoms = ase.atoms.Atoms(species,
cell=lattice_real,
constraint=constraints,
pbc=True,
scaled_positions=positions_frac,
)
atoms.set_calculator(self)
self._forces = forces_atoms
if self._warnings:
print("WARNING: %s contains warnings" % castep_file)
for warning in self._warnings:
print(warning)
# reset
self._warnings = []
# TODO: check that this is really backwards compatible
# with previous routine with this name...
def read_symops(self, castep_castep=None):
"""Read all symmetry operations used from a .castep file."""
if castep_castep is None:
castep_castep = self._seed + ".castep"
if isinstance(castep_castep, str):
if not os.path.isfile(castep_castep):
print('Warning: CASTEP file %s not found!' % castep_castep)
f = paropen(castep_castep, 'a')
while True:
line = f.readline()
if not line:
return
if "output verbosity" in line:
iprint = line.split()[-1][1]
# filter out the default
if int(iprint) != 1:
self.param.iprint = iprint
if "Symmetry and Constraints" in line:
break
elif isinstance(castep_castep, file):
f = castep_castep
else:
raise TypeError('read_castep_castep_symops: castep_castep is' \
+ 'not of type file or str!')
if self.param.iprint is None or self.param.iprint < 2:
self._interface_warnings.append('Warning: No symmetry' \
+ 'operations could be read from %s (iprint < 2).' % f.name)
return
while True:
line = f.readline()
if not line:
break
if "Number of symmetry operations" in line:
nsym = int(line.split()[5])
# print "nsym = %d" % nsym
# information about symmetry related atoms currently not read
symmetry_operations = []
for _ in range(nsym):
rotation = []
displacement = []
while True:
if "rotation" in f.readline():
break
for _ in range(3):
line = f.readline()
rotation.append(map(float, line.split()[1:4]))
while True:
if "displacement" in f.readline():
break
line = f.readline()
displacement = map(float, line.split()[1:4])
symop = {'rotation': rotation,
'displacement': displacement}
self.symmetry_ops = symop
self.symmetry = symmetry_operations
print "Symmetry operations successfully read from %s" % f.name
print self.cell.symmetry_ops
break
if isinstance(castep_castep, str):
f.close()
# return self.symmetry
def set_label(self, label):
"""The label is part of each seed, which in turn is a prefix
in each CASTEP related file.
"""
self._label = label
def set_pspot(self, pspot, elems=None, notelems=None, clear=True):
"""Quickly set all pseudo-potentials: Usually CASTEP psp are named
like <Elem>_<LibraryName>.usp so this function function only expects
the <LibraryName>. It then clears any previous pseudopotential
settings apply the one with <LibraryName> for each element in the
atoms object. The optional elems and notelems arguments can be used
to exclusively assign to some species, or to exclude with notelemens.
"""
if clear and not elems and not notelems:
self.cell.species_pot.clear()
for elem in set(self.atoms.get_chemical_symbols()):
if elems is not None and elem not in elems:
continue
if notelems is not None and elem in notelems:
continue
self.cell.species_pot = (elem, '%s_%s.usp' % (elem, pspot))
def get_forces(self, atoms):
"""Run CASTEP calculation if needed and return forces."""
self.update(atoms)
return np.array(self._forces)
def get_total_energy(self, atoms):
"""Run CASTEP calculation if needed and return total energy."""
self.update(atoms)
return self._energy_total
def get_free_energy(self, atoms):
"""Run CASTEP calculation if needed and return free energy.
Only defined with smearing."""
self.update(atoms)
return self._energy_free
def get_0K_energy(self, atoms):
"""Run CASTEP calculation if needed and return 0K energy.
Only defined with smearing."""
self.update(atoms)
return self._energy_0K
#here for compatability with ase/calculators/general.py
#but accessing only _name variables
def get_potential_energy(self, atoms, force_consistent=False):
"""Return the total potential energy."""
self.update(atoms)
if force_consistent:
return self._energy_free
else:
if self._energy_0K is not None:
return self._energy_0K
else:
return self._energy_total
def get_stress(self, atoms):
"""Return the stress."""
self.update(atoms)
return self._stress
def get_unit_cell(self, atoms):
"""Return the unit cell."""
self.update(atoms)
return self._unit_cell
def get_kpoints(self, atoms):
"""Return the kpoints."""
self.update(atoms)
return self._kpoints
def get_number_cell_constraints(self, atoms):
"""Return the number of cell constraints."""
self.update(atoms)
return self._number_of_cell_constraints
def set_atoms(self, atoms):
"""Sets the atoms for the calculator and vice versa."""
atoms.pbc = [True, True, True]
self.__dict__['atoms'] = atoms.copy()
self.atoms._calc = self
def update(self, atoms):
"""Checks if atoms object or calculator changed and
runs calculation if so.
"""
if self.calculation_required(atoms):
self.calculate(atoms)
def calculation_required(self, atoms, _=None):
"""Checks wether anything changed in the atoms object or CASTEP
settings since the last calculation using this instance.
"""
if not self.atoms == self._old_atoms:
return True
if self._old_param is None or self._old_cell is None:
return True
if not self.param._options == self._old_param._options:
return True
if not self.cell._options == self._old_cell._options:
return True
return False
def calculate(self, atoms):
"""Write all necessary input file and call CASTEP."""
self.prepare_input_files(atoms, force_write=self._force_write)
if not self._prepare_input_only:
self.run()
self.read()
def push_oldstate(self):
"""This function pushes the current state of the (CASTEP) Atoms object
onto the previous state. Or in other words after calling this function,
calculation_required will return False and enquiry functions just
report the current value, e.g. get_forces(), get_potential_energy().
"""
# make a snapshot of all current input
# to be able to test if recalculation
# is necessary
self._old_atoms = self.atoms.copy()
self._old_param = deepcopy(self.param)
self._old_cell = deepcopy(self.cell)
def initialize(self, *args, **kwargs):
"""Just an alias for prepar_input_files to comply with standard
function names in ASE.
"""
self.prepare_input_files(*args, **kwargs)
def prepare_input_files(self, atoms=None, force_write=None):
"""Only writes the input .cell and .param files and return
This can be useful if one quickly needs to prepare input files
for a cluster where no python or ASE is available. One can than
upload the file manually and read out the results using
Castep().read().
"""
if self.param.reuse.value is None:
print("You have not set e.g. calc.param.reuse = True")
print("Reusing a previous calculation saves a lot of CPU time!\n")
print("The interface will make sure, that there is .check")
print("file before adding this statement to the .param file.\n")
if self.param.num_dump_cycles.value is None:
print("You have not set e.g. calc.param.num_dump_cycles = 0.")
print("This can save a lot of disk space. One only needs *wvfn*")
print("if electronic convergence isn't achieved in one go.\n")
from ase.io.castep import write_param
if atoms is None:
atoms = self.atoms
else:
self.atoms = atoms
if force_write is None:
force_write = self._force_write
# if we have new instance of the calculator,
# move existing results out of the way, first
if os.path.isdir(self._directory)\
and self._calls == 0 \
and self._rename_existing_dir:
if os.listdir(self._directory) == []:
os.rmdir(self._directory)
else:
# rename appending creation date of the directory
ctime = time.localtime(os.lstat(self._directory).st_ctime)
os.rename(self._directory, '%s.bak-%s'
% (self._directory, time.strftime("%Y%m%d-%H%M%S", ctime)))
# create work directory
if not os.path.isdir(self._directory):
os.mkdir(self._directory, 0775)
if self._calls == 0:
self._fetch_pspots()
cwd = os.getcwd()
os.chdir(self._directory)
# if _try_reuse is requested and this
# is not the first run, we try to find
# the .check file from the previous run
# this is only necessary if _track_output
# is set to true
if self._try_reuse and self._calls > 0:
if os.path.exists(self._check_file):
self.param.reuse = self._check_file
elif os.path.exists(self._castep_bin_file):
self.param.reuse = self._castep_bin_file
self._seed = self._build_castep_seed()
self._check_file = '%s.check' % self._seed
self._castep_bin_file = '%s.castep_bin' % self._seed
self._castep_file = os.path.abspath('%s.castep' % self._seed)
# write out the input file
self._write_cell('%s.cell' % self._seed,
self.atoms, force_write=force_write)
if self._export_settings:
interface_options = self._opt
else:
interface_options = None
write_param('%s.param' % self._seed, self.param,
check_checkfile=True,
force_write=force_write,
interface_options=interface_options,)
os.chdir(cwd)
def _build_castep_seed(self):
"""Abstracts to construction of the final castep <seed>
with and without _tracking_output.
"""
if self._track_output:
return "%s-%06d" % (self._label, self._calls)
else:
return "%s" % (self._label)
def run(self):
"""Simply call castep. If the first .err file
contains text, this will be printed to the screen.
"""
# change to target directory
cwd = os.getcwd()
os.chdir(self._directory)
self._calls += 1
# run castep itself
stdout, stderr = shell_stdouterr('%s %s' % (self._castep_command, self._seed))
if stdout:
print('castep call stdout:\n%s' % stdout)
if stderr:
print('castep call stderr:\n%s' % stderr)
self.push_oldstate()
# check for non-empty error files
err_file = '%s.0001.err' % self._seed
if os.path.exists(err_file):
err_file = open(err_file)
self._error = err_file.read()
err_file.close()
os.chdir(cwd)
if self._error:
print(self._error)
def __repr__(self):
"""Returns generic, fast to capture representation of
CASTEP settings along with atoms object.
"""
expr = ''
expr += '-----------------Atoms--------------------\n'
if self.atoms is not None:
expr += str('%20s\n' % self.atoms)
else:
expr += 'None\n'
expr += '-----------------Param keywords------------\n'
expr += str(self.param)
expr += '-----------------Cell keywords------------\n'
expr += str(self.cell)
expr += '-----------------Internal keys------------\n'
for key in self.internal_keys:
expr += '%20s : %s\n' % (key, self._opt[key])
return expr
def __getattr__(self, attr):
"""___getattr___ gets overloaded to reroute the internal keys
and to be able to easily store them in in the param so that
they can be read in again in subsequent calls.
"""
if attr in self.internal_keys:
return self._opt[attr]
if attr in ['__repr__', '__str__']:
raise AttributeError
elif attr not in self.__dict__:
raise AttributeError
else:
return self.__dict__[attr]
def __setattr__(self, attr, value):
"""We overload the settattr method to make value assignment
as pythonic as possible. Internal values all start with _.
Value assigment is case insensitive!
"""
if attr.startswith('_'):
# internal variables all start with _
# let's check first if they are close but not identical
# to one of the switches, that the user accesses directly
similars = difflib.get_close_matches(attr, self.internal_keys,
cutoff=0.9)
if attr not in self.internal_keys and similars:
print('Warning: You probably tried one of: %s' % similars)
print('but typed %s' % attr)
if attr in self.internal_keys:
self._opt[attr] = value
if attr == '_track_output':
if value:
self._try_reuse = True
print('You switched _track_output on. This will')
print('consume a lot of disk-space. The interface')
print('also switched _try_reuse on, which will')
print('try to find the last check file. Set')
print('_try_reuse = False, if you need')
print('really separate calculations')
elif '_try_reuse' in self._opt and self._try_reuse:
self._try_reuse = False
print("_try_reuse is set to False, too")
else:
self.__dict__[attr] = value
return
elif attr in ['atoms', 'cell', 'param']:
if value is not None:
if attr == 'atoms' and not isinstance(value, ase.atoms.Atoms):
raise TypeError('%s is not an instance of ase.atoms.Atoms.'
% value)
elif attr == 'cell' and not isinstance(value, CastepCell):
raise TypeError('%s is not an instance of CastepCell.'
% value)
elif attr == 'param' and not isinstance(value, CastepParam):
raise TypeError('%s is not an instance of CastepParam.'
% value)
# These 3 are accepted right-away, no matter what
self.__dict__[attr] = value
return
elif attr in self.atoms_obj_keys:
# keywords which clearly belong to the atoms object are
# rerouted to go there
self.atoms.__dict__[attr] = value
return
elif attr in self.atoms_keys:
# CASTEP keywords that should go into the atoms object
# itself are blocked
print(("Ignoring setings of '%s', since this has to be set\n" +
"through the atoms object") % attr)
return
attr = attr.lower()
if attr not in (self.cell._options.keys()\
+ self.param._options.keys()):
# what is left now should be meant to be a castep keyword
# so we first check if it defined, and if not offer some error
# correction
similars = difflib.get_close_matches(attr,
self.cell._options.keys() + self.param._options.keys())
if similars:
raise UserWarning(('Option "%s" not known! You mean "%s"?')
% (attr, similars[0]))
else:
raise UserWarning('Option "%s" is not known!' % attr)
# here we know it must go into one of the component param or cell
# so we first determine which one
if attr in self.param._options.keys():
comp = 'param'
elif attr in self.cell._options.keys():
comp = 'cell'
else:
raise UserWarning('Programming error: could not attach ' \
+ 'the keyword to an input file')
self.__dict__[comp].__setattr__(attr, value)
def merge_param(self, param, overwrite=True, ignore_internal_keys=False):
"""Parse a param file and merge it into the current parameters."""
INT_TOKEN = 'ASE_INTERFACE'
if isinstance(param, CastepParam):
for key, option in param._options.iteritems():
if option.value is not None:
self.param.__setattr__(key, option.value)
return
elif type(param) is str:
param_file = open(param, 'r')
elif type(param) is file:
param_file = param
else:
print("The param filename is neither a string nor a filehandler")
return
for i, line in enumerate(param_file.readlines()):
line = line.strip()
# remove comments
for comment_char in ['#', ';', '!']:
if comment_char in line:
if INT_TOKEN in line:
# This block allows to read internal settings from
# a *param file
iline = line[line.index(INT_TOKEN) + len(INT_TOKEN):]
if iline.split()[0] in self.internal_keys \
and not ignore_internal_keys:
value = ' '.join(iline.split()[2:])
if value in ['True', 'False']:
self._opt[iline.split()[0]] = eval(value)
else:
self._opt[iline.split()[0]] = value
line = line[:line.index(comment_char)]
# if nothing remains
if not line.strip():
continue
line = re.sub(':', ' ', line)
if line == 'reuse':
self.param.reuse.value = 'default'
continue
if line == 'continuation':
self.param.continuation.value = 'default'
continue
try:
key, value = line.split()
except:
print("Could not parse line %s of your param file: %s"
% (i, line))
raise UserWarning("Seems to me malformed")
if not overwrite and getattr(self.param, key).value is not None:
continue
self.__setattr__(key, value)
def dryrun_ok(self, dryrun_flag='-dryrun'):
"""Starts a CASTEP run with the -dryrun flag [default]
in a temporary and check wether all variables are initialized
correctly. This is recommended for every bigger simulation.
"""
from ase.io.castep import write_param
temp_dir = tempfile.mkdtemp()
curdir = os.getcwd()
self._fetch_pspots(temp_dir)
os.chdir(temp_dir)
self._fetch_pspots(temp_dir)
seed = 'dryrun'
cell_written = self._write_cell('%s.cell' % seed, self.atoms)
if not cell_written:
print "%s.cell not written - aborting dryrun" % seed
return
write_param('%s.param' % seed, self.param, )
stdouterr = shell_stdouterr(('%s %s %s' % (self._castep_command,
seed,
dryrun_flag)))
if stdouterr:
print(stdouterr)
result_file = open('%s.castep' % seed)
txt = result_file.read()
ok_string = r'.*DRYRUN finished.*No problems found with input files.*'
match = re.match(ok_string, txt, re.DOTALL)
try:
self._kpoints_used = int(
re.search(
r'Number of kpoints used = *([0-9]+)', txt).group(1))
except:
print('Couldn\'t fetch number of kpoints from dryrun CASTEP file')
err_file = '%s.0001.err' % seed
if match is None and os.path.exists(err_file):
err_file = open(err_file)
self._error = err_file.read()
err_file.close()
result_file.close()
os.chdir(curdir)
shutil.rmtree(temp_dir)
# re.match return None is the string does not match
return match is not None
# this could go into the Atoms() class at some point...
def _get_number_in_species(self, at, atoms=None):
"""Return the number of the atoms within the set of it own
species. If you are an ASE commiter: why not move this into
ase.atoms.Atoms ?"""
if atoms is None:
atoms = self.atoms
numbers = atoms.get_atomic_numbers()
n = numbers[at]
nis = numbers.tolist()[:at + 1].count(n)
return nis
def _get_absolute_number(self, species, nic, atoms=None):
"""This is the inverse function to _get_number in species."""
if atoms is None:
atoms = self.atoms
ch = atoms.get_chemical_symbols()
ch.reverse()
total_nr = 0
assert nic > 0, 'Number in species needs to be 1 or larger'
while True:
if ch.pop() == species:
if nic == 1:
return total_nr
nic -= 1
total_nr += 1
def _fetch_pspots(self, directory=None):
"""Print all specified pseudo-potentials into the working directory.
"""
if directory is None:
directory = self._directory
if not os.path.isdir(self._castep_pp_path):
print("PSPs directory %s not found" % self._castep_pp_path)
pspots = {}
if self.cell.species_pot.value is not None:
for line in self.cell.species_pot.value.split('\n'):
line = line.split()
if line:
pspots[line[0]] = line[1]
for species in self.atoms.get_chemical_symbols():
if not pspots or species not in pspots.keys():
print("Warning: you have no PP specified for %s." % species)
print("CASTEP will now generate an on-the-fly potentials.")
print("For sake of numerical consistency and efficiency")
print("this is discouraged.")
if self.cell.species_pot.value:
for (species, pspot) in pspots.iteritems():
orig_pspot_file = os.path.join(self._castep_pp_path, pspot)
cp_pspot_file = os.path.join(directory, pspot)
if os.path.exists(orig_pspot_file)\
and not os.path.exists(cp_pspot_file):
if self._copy_pspots:
shutil.copy(orig_pspot_file, directory)
else:
os.symlink(os.path.join(self._castep_pp_path, pspot),
cp_pspot_file)
def get_castep_version(castep_command):
"""This returns the version number as printed in the CASTEP banner.
"""
temp_dir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(temp_dir)
jname = 'dummy_jobname'
stdout, stderr = "", ""
try:
stdout, stderr = subprocess.Popen(castep_command.split()
+ [jname, '-dryrun'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, cwd=temp_dir).communicate()
except:
msg = ""
msg += "Could not determine the version of your CASTEP binary \n"
msg += "This usually means one of the following \n"
msg += " * you don't have CASTEP installed \n"
msg += " * you have not set the CASTEP_COMMAND to call it \n"
msg += " * you have provided a wrong CASTEP_COMMAND. \n"
msg += " Make sure it is in your PATH\n\n"
msg += stdout
msg += stderr
raise Exception(msg)
output = open('%s.castep' % jname)
output_txt = output.readlines()
output.close()
os.chdir(curdir)
shutil.rmtree(temp_dir)
for line in output_txt:
if 'CASTEP version' in line:
return float(re.findall(r'(?<=CASTEP version )[0-9.]*', line)[0])
def create_castep_keywords(castep_command, filename='castep_keywords.py',
force_write=True, path='.', fetch_only=None):
"""This function allows to fetch all available keywords from stdout
of an installed castep binary. It furthermore collects the documentation
to harness the power of (ipython) inspection and type for some basic
type checking of input. All information is stored in two 'data-store'
objects that are not distributed by default to avoid breaking the license
of CASTEP.
"""
# Takes a while ...
# Fetch all allowed parameters
# fetch_only : only fetch that many parameters (for testsuite only)
code = {}
suffixes = ['cell', 'param']
for suffix in suffixes:
code[suffix] = ''
if os.path.exists(filename) and not force_write:
print('CASTEP Options Module file exists.')
print('You can overwrite it by calling')
print('python castep.py -f [CASTEP_COMMAND].')
return False
fh = open(os.path.join(path, filename), 'w')
fh.write('"""This file is generated by')
fh.write('ase/calculators/castep.py\n')
fh.write('and is not distributed with ASE to avoid breaking')
fh.write('CASTEP copyright\n"""\n')
fh.write('class Opt:\n')
fh.write(' """"A CASTEP option"""\n')
fh.write(""" def __init__(self):
self.keyword = None
self.level = None
self.type = None
self.type = None
def clear(self):
\"\"\"Reset the value of the option to None again\"\"\"
self.value = None\n""")
fh.write(' def __repr__(self):\n')
fh.write(' expr = \'\'\n')
fh.write(' if self.value:\n')
fh.write(' expr += \'Option: %s(%s, %s):\\n%s\\n\''\
+ '% (self.keyword, self.type, self.level, self.value)\n')
fh.write(' else:\n')
fh.write(' expr += \'Option: %s[unset]\' % self.keyword\n')
fh.write(' expr += \'(%s, %s)\' % (self.type, self.level)\n')
fh.write(' return expr\n\n')
fh.write("""class ComparableDict(dict):
\"\"\"Extends a dict to make to sets of options comparable\"\"\"
def __init__(self):
dict.__init__(self)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if not isinstance(other, ComparableDict):
return False
if set(self) - set(other):
return False
for key in sorted(self):
if self[key].value != other[key].value:
return False
return True\n""")
code['cell'] += '\n\nclass CastepCellDict(object):\n'
code['param'] += '\n\nclass CastepParamDict(object):\n'
types = []
levels = []
for suffix in suffixes:
code[suffix] += ' """A flat object that holds %s options"""\n'\
% suffix
code[suffix] += ' def __init__(self):\n'
code[suffix] += ' object.__init__(self)\n'
code[suffix] += ' self._options = ComparableDict()\n'
castep_version = get_castep_version(castep_command)
help_all, _ = shell_stdouterr('%s -help all' % castep_command)
# Filter out proper keywords
try:
raw_options = re.findall(r'(?<=^ )[A-Z_]+', help_all, re.MULTILINE)
except:
print('Problem parsing: %s' % help_all)
raise
processed_options = 0
for option in raw_options[:fetch_only]:
doc, _ = shell_stdouterr('%s -help %s' % (castep_command, option))
# Stand Back! I know regular expressions (http://xkcd.com/208/) :-)
match = re.match(r'(?P<before_type>.*)Type: (?P<type>[^ ]+).*' + \
r'Level: (?P<level>[^ ]+)\n\s*\n' + \
r'(?P<doc>.*?)(\n\s*\n|$)', doc, re.DOTALL)
if match is not None:
match = match.groupdict()
processed_options += 1
if re.findall(r'PARAMETERS keywords:\n\n None found', doc):
suffix = 'cell'
else:
suffix = 'param'
sys.stdout.write('.')
sys.stdout.flush()
code[suffix] += ' opt_obj = Opt()\n'
code[suffix] += (' opt_obj.keyword = \'%s\'\n'
% option.lower())
if 'type' in match:
code[suffix] += (' opt_obj.type = \'%s\'\n'
% match['type'])
if match['type'] not in types:
types.append(match['type'])
else:
raise Exception('Found no type for %s' % option)
if 'level' in match:
code[suffix] += (' opt_obj.level = \'%s\'\n'
% match['level'])
if match['level'] not in levels:
levels.append(match['level'])
else:
raise Exception('Found no level for %s' % option)
if 'doc' in match:
code[suffix] += (' opt_obj.__doc__ = """%s\n"""\n'
% match['doc'])
else:
raise Exception('Found no doc string for %s' % option)
code[suffix] += (' opt_obj.value = None\n')
code[suffix] += (' self._options[\'%s\'] = opt_obj\n\n'
% option.lower())
code[suffix] += (' self.__dict__[\'%s\'] = opt_obj\n\n'
% option.lower())
else:
sys.stdout.write(doc)
sys.stdout.flush()
raise Exception('create_castep_keywords: Could not process %s'
% option)
# write classes out
for suffix in suffixes:
fh.write(code[suffix])
fh.write('types = %s\n' % types)
fh.write('levels = %s\n' % levels)
fh.write('castep_version = %s\n\n' % castep_version)
fh.close()
print('\nCASTEP v%s, fetched %s keywords'
% (castep_version, processed_options))
return True
class CastepParam(object):
"""CastepParam abstracts the settings that go into the .param file"""
def __init__(self):
object.__init__(self)
castep_keywords = import_castep_keywords()
castep_param_dict = castep_keywords.CastepParamDict()
self._options = castep_param_dict._options
self.__dict__.update(self._options)
def __repr__(self):
expr = ''
if filter(lambda x: x.value is not None, self._options.values()):
for key, option in sorted(self._options.iteritems()):
if option.value is not None:
expr += ("%20s : %s\n" % (key, option.value))
else:
expr += 'Default\n'
return expr
def __setattr__(self, attr, value):
if attr.startswith('_'):
self.__dict__[attr] = value
return
if attr not in self._options.keys():
similars = difflib.get_close_matches(attr, self._options.keys())
if similars:
raise UserWarning(('Option "%s" not known! You mean "%s"?')
% (attr, similars[0]))
else:
raise UserWarning('Option "%s" is not known!' % attr)
attr = attr.lower()
opt = self._options[attr]
if not opt.type == 'Block' and type(value) is str:
value = value.replace(':', ' ')
if opt.type in ['Boolean', 'Defined']:
if False:
pass
else:
try:
value = bool(eval(str(value).title()))
except:
raise ConversionError('bool', attr, value)
self._options[attr].value = value
elif opt.type == 'String':
if attr == 'reuse':
if self._options['continuation'].value:
print('Cannot set reuse if continuation is set, and')
print('vice versa. Set the other to None, if you want')
print('this setting.')
else:
if value is True:
self._options['reuse'].value = 'default'
else:
self._options['reuse'].value = str(value)
elif attr == 'continuation':
if self._options['reuse'].value:
print('Cannot set continuation if reuse is set, and')
print('vice versa. Set the other to None, if you want')
print('this setting.')
else:
if value is True:
self._options['continuation'].value = 'default'
else:
self._options['continuation'].value = str(value)
else:
try:
value = str(value)
except:
raise ConversionError('str', attr, value)
self._options[attr].value = value
elif opt.type == 'Integer':
if False:
pass
else:
try:
value = int(value)
except:
raise ConversionError('int', attr, value)
self._options[attr].value = value
elif opt.type in ['Real', 'Physical']:
# Usage of the CASTEP unit system is not implemented for now.
# We assume, that the user is happy with setting/getting the
# CASTEP default units refer to http://goo.gl/bqYf2
# page 13, accessed Apr 6, 2011
try:
value = float(value)
except:
raise ConversionError('float', attr, value)
self._options[attr].value = value
# So far there is no block type in .param
else:
raise RuntimeError("Caught unhandled option: %s = %s"
% (attr, value))
class CastepCell(object):
"""CastepCell abstracts all setting that go into the .cell file"""
def __init__(self):
object.__init__(self)
castep_keywords = import_castep_keywords()
castep_cell_dict = castep_keywords.CastepCellDict()
self._options = castep_cell_dict._options
self.__dict__.update(self._options)
def __repr__(self):
expr = ''
if filter(lambda x: x.value is not None, self._options.values()):
for key, option in sorted(self._options.iteritems()):
if option.value is not None:
expr += ("%20s : %s\n" % (key, option.value))
else:
expr += 'Default\n'
return expr
def __setattr__(self, attr, value):
if attr.startswith('_'):
self.__dict__[attr] = value
return
if attr not in self._options.keys():
similars = difflib.get_close_matches(attr, self._options.keys())
if similars:
raise UserWarning(('Option "%s" not known! You mean "%s"?')
% (attr, similars[0]))
else:
raise UserWarning('Option "%s" is not known!' % attr)
return
attr = attr.lower()
opt = self._options[attr]
if not opt.type == 'Block' and type(value) is str:
value = value.replace(':', ' ')
if opt.type in ['Boolean', 'Defined']:
try:
value = bool(eval(str(value).title()))
except:
raise ConversionError('bool', attr, value)
self._options[attr].value = value
elif opt.type == 'String':
if False:
pass
else:
try:
value = str(value)
except:
raise ConversionError('str', attr, value)
self._options[attr].value = value
elif opt.type == 'Integer':
if attr == 'kpoint_mp_grid':
opt = self._options['kpoints_mp_grid']
if attr in ['kpoints_mp_grid', 'kpoint_mp_grid']:
if ',' in value:
value = value.replace(',', ' ')
if type(value) is str and len(value.split()) == 3:
try:
_ = [int(x) for x in value.split()]
except:
raise ConversionError('int', attr, value)
opt.value = value
else:
print('Wrong format for kpoints_mp_grid: expected R R R')
print('and you said %s' % value)
else:
try:
value = int(value)
except:
raise ConversionError('int', attr, value)
self._options[attr].value = value
elif opt.type in ['Real', 'Physical']:
if attr == 'kpoint_mp_offset':
opt = self._options['kpoints_mp_offset']
if attr in ['kpoints_mp_offset', 'kpoint_mp_offset']:
if type(value) is str and len(value.split()) == 3:
try:
_ = [float(x) for x in value.split()]
except:
raise ConversionError('float', attr, value)
opt.value = value
else:
try:
value = float(value)
except:
raise ConversionError('float', attr, value)
self._options[attr].value = value
elif opt.type == 'Block':
if attr == 'species_pot':
if type(value) is not tuple \
or len(value) != 2:
print("Please specify pseudopotentials in python as")
print("a tuple, like:")
print("(species, file), e.g. ('O', 'path-to/O_OTFG.usp')")
print("Anything else will be ignored")
else:
if self.__dict__['species_pot'].value is None:
self.__dict__['species_pot'].value = ''
self.__dict__['species_pot'].value = \
re.sub(r'\n?\s*%s\s+.*' % value[0], '',
self.__dict__['species_pot'].value)
if value[1]:
self.__dict__['species_pot'].value += '\n%s %s' \
% value
# now sort lines as to match the CASTEP output
pspots = self.__dict__['species_pot'].value.split('\n')
# throw out empty lines
pspots = filter(lambda x: x, pspots)
# sort based on atomic numbers
pspots.sort(key=lambda x: ase.data.atomic_numbers[
x.split()[0]])
# rejoin; the first blank-line
# makes the print(calc) output look prettier
self.__dict__['species_pot'].value = \
'\n' + '\n'.join(pspots)
return
elif attr == 'symmetry_ops':
if type(value) is not dict \
or not 'rotation' in value \
or not len(value['rotation']) == 3 \
or not len(value['displacement']) == 3 \
or not 'displacement' in value:
print("Cannot process your symmetry_op %s" % value)
print("It has statet like {'rotation':[a, b, c], ")
print(" 'displacement': [x, y, z]}")
return
if self.__dict__['symmetry_ops'].value is None:
self.__dict__['symmetry_ops'].value = ''
n = (len(self.__dict__['symmetry_ops'].value.split('\n'))
/ 4) + 1
for i in range(3):
self.__dict__['symmetry_ops'].value += \
(("%9.6f " * 3 + "! rotation %5d\n")\
% (tuple(value['rotation'][i] + (n, ))))
self.__dict__['symmetry_ops'].value\
+= (("%9.6f " * 3 + "! displacement %5d \n")\
% (tuple(value['displacement'] + (n, ))))
elif attr in ['positions_abs_intermediate',
'positions_abs_product']:
if not isinstance(value, ase.atoms.Atoms):
raise UserWarning('castep.cell.%s expects Atoms object'
% attr)
target = self.__dict__[attr]
target.value = ''
for elem, pos in zip(value.get_chemical_symbols(),
value.get_positions()):
target.value += ('%4s %9.6f %9.6f %9.6f\n' % (elem,
pos[0],
pos[1],
pos[2]))
return
elif attr in ['cell_constraints']:
# put block type options here, that don't need special care
try:
value = str(value)
except:
raise ConversionError('str', attr, value)
else:
print('Not implemented')
print('The option %s is of block type, which usually' % attr)
print('needs some special care to get the formattings right.')
print('Please feel free to add it and send the')
print('patch to %s, so we can all benefit.' % contact_email)
raise
self._options[attr].value = value
else:
raise RuntimeError('Caught unhandled option: %s = %s'
% (attr, value))
class ConversionError(Exception):
"""Print customized error for options that are not converted correctly
and point out that they are maybe not implemented, yet"""
def __init__(self, key_type, attr, value):
Exception.__init__(self)
self.key_type = key_type
self.value = value
self.attr = attr
def __str__(self):
return "Could not convert %s = %s to %s\n" \
% (self.attr, self.value, self.key_type) \
+ "This means you either tried to set a value of the wrong\n"\
+ "type or this keyword needs some special care. Please feel\n"\
+ "to add it to the corresponding __setattr__ method and send\n"\
+ "the patch to max.hoffmann@tum.de, so we can all benefit."
def get_castep_pp_path(castep_pp_path=''):
"""Abstract the quest for a CASTEP PSP directory."""
if castep_pp_path:
return os.path.abspath(os.path.expanduser(castep_pp_path))
elif 'CASTEP_PP_PATH' in os.environ:
return os.environ['CASTEP_PP_PATH']
else:
return os.path.abspath('.')
def get_castep_command(castep_command=''):
"""Abstract the quest for a castep_command string."""
if castep_command:
return castep_command
elif 'CASTEP_COMMAND' in os.environ:
return os.environ['CASTEP_COMMAND']
else:
return 'castep'
def shell_stdouterr(raw_command):
"""Abstracts the standard call of the commandline, when
we are only interested in the stdout and stderr
"""
stdout, stderr = subprocess.Popen(raw_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True).communicate()
return stdout.strip(), stderr.strip()
def import_castep_keywords():
try:
import castep_keywords
except ImportError:
print(""" Generating castep_keywords.py ... hang on.
The castep_keywords.py contains abstractions for CASTEP input
parameters (for both .cell and .param input files), including some
format checks and descriptions. The latter are extracted from the
internal online help facility of a CASTEP binary, thus allowing to
easily keep the calculator synchronized with (different versions of)
the CASTEP code. Consequently, avoiding licensing issues (CASTEP is
distributed commercially by accelrys), we consider it wise not to
provide castep_keywords.py in the first place.
""")
create_castep_keywords(get_castep_command())
print("""\n\n Stored castep_keywords.py in %s. Copy castep_keywords.py to your
ASE installation under ase/calculators for system-wide installation
""" % os.path.abspath(os.path.curdir))
import castep_keywords
return castep_keywords
if __name__ == '__main__':
print("When called directly this calculator will fetch all available")
print("keywords from the binarys help function into a castep_keywords.py")
print("in the current directory %s" % os.getcwd())
print("For system wide usage, it can be copied into an ase installation")
print("at ASE/calculators.\n")
print("This castep_keywords.py usually only needs to be generated once")
print("for a CASTEP binary/CASTEP version.")
import optparse
parser = optparse.OptionParser()
parser.add_option('-f', '--force-write', dest='force_write',
help='Force overwriting existing castep_keywords.py', default=False,
action='store_true')
(options, args) = parser.parse_args()
if args:
opt_castep_command = ''.join(args)
else:
opt_castep_command = ''
generated = create_castep_keywords(get_castep_command(opt_castep_command),
force_write=options.force_write)
if generated:
try:
execfile('castep_keywords.py')
except Exception, e:
print(e)
print("Ooops, something went wrong with the CASTEP keywords")
else:
print("Import works. Looking good!")
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/calculators/castep.py
|
Python
|
gpl-2.0
| 76,859
|
[
"ASE",
"CASTEP"
] |
73270e3d993fc5e618acceb640202e4cedca9defc9233ac1a99fc62fe2c9cfb9
|
"""Migration script to add status and error_message columns to the tool_shed_repository table."""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
# Add the status column to the tool_shed_repository table.
col = Column( "status", TrimmedString( 255 ) )
try:
col.create( ToolShedRepository_table )
assert col is ToolShedRepository_table.c.status
except Exception, e:
print "Adding status column to the tool_shed_repository table failed: %s" % str( e )
# Add the error_message column to the tool_shed_repository table.
col = Column( "error_message", TEXT )
try:
col.create( ToolShedRepository_table )
assert col is ToolShedRepository_table.c.error_message
except Exception, e:
print "Adding error_message column to the tool_shed_repository table failed: %s" % str( e )
# Update the status column value for tool_shed_repositories to the default value 'Installed'.
cmd = "UPDATE tool_shed_repository SET status = 'Installed';"
try:
migrate_engine.execute( cmd )
except Exception, e:
print "Exception executing sql command: "
print cmd
print str( e )
# Update the status column for tool_shed_repositories that have been uninstalled.
cmd = "UPDATE tool_shed_repository SET status = 'Uninstalled' WHERE uninstalled;"
try:
migrate_engine.execute( cmd )
except Exception, e:
print "Exception executing sql command: "
print cmd
print str( e )
# Update the status column for tool_shed_repositories that have been deactivated.
cmd = "UPDATE tool_shed_repository SET status = 'Deactivated' where deleted and not uninstalled;"
try:
migrate_engine.execute( cmd )
except Exception, e:
print "Exception executing sql command: "
print cmd
print str( e )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
try:
ToolShedRepository_table.c.status.drop()
except Exception, e:
print "Dropping column status from the tool_shed_repository table failed: %s" % str( e )
try:
ToolShedRepository_table.c.error_message.drop()
except Exception, e:
print "Dropping column error_message from the tool_shed_repository table failed: %s" % str( e )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0103_add_tool_shed_repository_status_columns.py
|
Python
|
gpl-3.0
| 2,843
|
[
"Galaxy"
] |
29da31a94acc0aa91f95545495e86de7417a2b42ed652f70d77826ef91954801
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RIranges(RPackage):
"""Provides efficient low-level and highly
reusable S4 classes for storing,
manipulating and aggregating over annotated ranges of
integers. Implements an
algebra of range operations, including efficient
algorithms for finding overlaps
and nearest neighbors. Defines efficient list-like
classes for storing, transforming
and aggregating large grouped data,
i.e., collections of atomic vectors and DataFrames."""
homepage = "https://www.bioconductor.org/packages/IRanges/"
git = "https://git.bioconductor.org/packages/IRanges.git"
version('2.12.0', commit='1b1748655a8529ba87ad0f223f035ef0c08e7fcd')
version('2.10.5', commit='b00d1d5025e3c480d17c13100f0da5a0132b1614')
depends_on('r-biocgenerics@0.21.1:', type=('build', 'run'), when='@2.10.5')
depends_on('r-biocgenerics@0.23.3:', type=('build', 'run'), when='@2.12.0')
depends_on('r-s4vectors@0.13.17:', type=('build', 'run'), when='@2.10.5')
depends_on('r-s4vectors@0.15.5:', type=('build', 'run'), when='@2.12.0')
depends_on('r@3.4.0:3.4.9', when='@2.10.5:')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-iranges/package.py
|
Python
|
lgpl-2.1
| 2,378
|
[
"Bioconductor"
] |
7afea282c4717802d0cf183a36ee64417b8f2e59460f1d0b2e937fde72e03f84
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Some Bio.PDB-specific exceptions."""
# General error
class PDBException(Exception):
pass
# The PDB file cannot be unambiguously represented in the SMCRA
# data structure
class PDBConstructionException(Exception):
pass
class PDBConstructionWarning(Warning):
pass
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/PDBExceptions.py
|
Python
|
gpl-2.0
| 513
|
[
"Biopython"
] |
30ea8fed8beafe36eb21d7b7cb649683864ef99f880a9ad3727717f27495dde1
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from BrowserFactory import BrowserFactory
if __name__ == '__main__':
# 1 new factory object
factory = BrowserFactory()
# 2 create object
browser = factory.create('BrowserUrllib2')
# browser = factory.create('BrowserPhantomjs')
# browser = factory.create('BrowserRemote', host='localhost', port=55055)
# 3 operate
html = browser.visit('https://www.baidu.com')
if html:
print html
|
xtuyaowu/jtyd_python_spider
|
browser_interface/browser/demo.py
|
Python
|
mit
| 510
|
[
"VisIt"
] |
d86731234255bf37473cdc89087409164657e3fef6dd1b21eab51ddcda3561c8
|
# proxy module
from __future__ import absolute_import
from mayavi.modules.text3d import *
|
enthought/etsproxy
|
enthought/mayavi/modules/text3d.py
|
Python
|
bsd-3-clause
| 90
|
[
"Mayavi"
] |
34a7f41e76d3d4ccbc3fa37b2712087e86281dae1edc2d46a3de102db3a22ee4
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.domain.person import Supplier
from stoqlib.domain.product import Storable
from stoqlib.domain.purchase import PurchaseOrder
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.wizards.purchasewizard import PurchaseWizard, FinishPurchaseStep
from stoqlib.lib.dateutils import localdate
from stoqlib.lib.parameters import sysparam
class TestFinishPurchaseStep(GUITest):
def test_post_init(self):
purchase_order = self.create_purchase_order()
receiving_order = self.create_receiving_order(
purchase_order=purchase_order)
self.create_receiving_order_item(
receiving_order=receiving_order)
wizard = PurchaseWizard(store=self.store)
finish_step = FinishPurchaseStep(model=wizard.model,
store=self.store,
wizard=wizard)
sellable = self.create_sellable()
purchase_item = purchase_order.add_item(sellable=sellable)
receiving_order.add_purchase_item(purchase_item)
finish_step.post_init()
class TestPurchaseWizard(GUITest):
def _check_start_step(self, uitest='', identifier="12345"):
start_step = self.wizard.get_current_step()
start_step.identifier.update(identifier)
if uitest:
self.check_wizard(self.wizard, uitest)
self.click(self.wizard.next_button)
def _check_item_step(self, uitest=''):
item_step = self.wizard.get_current_step()
product = self.create_product()
Storable(product=product, store=self.store)
item_step.sellable_selected(product.sellable)
self.click(item_step.add_sellable_button)
if uitest:
self.check_wizard(self.wizard, uitest)
self.click(self.wizard.next_button)
def _check_payment_step(self, uitest=''):
if uitest:
self.check_wizard(self.wizard, uitest)
self.click(self.wizard.next_button)
def test_create(self):
# Allow creating purchases in the past.
sysparam.set_bool(self.store, 'ALLOW_OUTDATED_OPERATIONS', True)
with self.sysparam(MANDATORY_CHECK_NUMBER=True):
self.wizard = PurchaseWizard(self.store)
purchase_branch = self.create_branch()
purchase_order = PurchaseOrder(branch=purchase_branch)
sellable = self.create_sellable()
purchase_order.add_item(sellable=sellable)
self.wizard.model.identifier = 12345
self.wizard.model.open_date = localdate(2010, 1, 3).date()
self._check_start_step('wizard-purchase-start-step')
self._check_item_step('wizard-purchase-item-step')
payment_step = self.wizard.get_current_step()
payment_step.slave.bank_first_check_number.set_text('12')
self._check_payment_step('wizard-purchase-payment-step')
purchase = self.wizard.model
models = [purchase]
models.extend(purchase.get_items())
models.extend(purchase.payments)
models.append(purchase.group)
self.check_wizard(self.wizard, 'wizard-purchase-finish-step',
models=models)
self.click(self.wizard.next_button)
def test_create_without_active_supplier(self):
# Inactivating all the suppliers, so they wont show on PurchaseWizard
suppliers = self.store.find(Supplier)
for supplier in suppliers:
supplier.status = Supplier.STATUS_INACTIVE
wizard = PurchaseWizard(self.store)
step = wizard.get_current_step()
self.assertEquals(step.edit_supplier.get_sensitive(), False)
step.supplier.set_text('Invalid supplier')
self.assertEquals(step.edit_supplier.get_sensitive(), False)
# Activating the suppliers back
for supplier in suppliers:
supplier.status = Supplier.STATUS_ACTIVE
def test_edit_purchase_without_open_date(self):
purchase_order = self.create_purchase_order()
self.create_purchase_order_item(purchase_order)
purchase_order.status = PurchaseOrder.ORDER_PENDING
self.wizard = PurchaseWizard(self.store, purchase_order)
start_step = self.wizard.get_current_step()
start_step.open_date.update(None)
self.assertEquals(start_step.open_date.mandatory, True)
self.assertNotSensitive(self.wizard, ['next_button'])
def test_create_and_receive(self):
with self.sysparam(MANDATORY_CHECK_NUMBER=True):
self.wizard = PurchaseWizard(self.store)
self.wizard.model.identifier = 12345
self.wizard.model.open_date = localdate(2010, 1, 3).date()
self._check_start_step()
self._check_item_step()
payment_step = self.wizard.get_current_step()
payment_step.slave.bank_first_check_number.set_text('12')
self._check_payment_step()
finish_step = self.wizard.get_current_step()
finish_step.receive_now.set_active(True)
self.wizard.model.expected_receival_date = localdate(2010, 1, 4).date()
self.wizard.enable_next()
self.click(self.wizard.next_button)
receiving_step = self.wizard.get_current_step()
receiving_step.invoice_slave.identifier.set_text("12345")
receiving_step.invoice_slave.invoice_number.update(67890)
self.check_wizard(self.wizard, 'wizard-purchase-invoice-step')
self.click(self.wizard.next_button)
purchase = self.wizard.model
models = [purchase]
models.extend(purchase.get_items())
models.extend(purchase.payments)
models.append(purchase.group)
receive = self.wizard.receiving_model
models.append(receive)
models.extend(receive.get_items())
for item in receive.get_items():
models.extend(
list(item.sellable.product_storable.get_stock_items()))
self.check_wizard(self.wizard, 'wizard-purchase-done-received',
models=models)
def test_no_receive_now_for_batch_items(self):
with self.sysparam(MANDATORY_CHECK_NUMBER=True):
sellable = self.create_sellable()
product = self.create_product()
storable = self.create_storable(is_batch=True)
storable.product = product
sellable.product = product
wizard = PurchaseWizard(self.store)
self.click(wizard.next_button)
step = wizard.get_current_step()
step.sellable_selected(sellable)
self.click(step.add_sellable_button)
self.click(wizard.next_button)
payment_step = wizard.get_current_step()
payment_step.slave.bank_first_check_number.set_text('12')
self.click(wizard.next_button)
step = wizard.get_current_step()
self.assertNotVisible(step, ['receive_now'])
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_purchase_wizard.py
|
Python
|
gpl-2.0
| 7,958
|
[
"VisIt"
] |
3739cd7b718c4ac78f91fd652b9cfdb5fb74392cdc2f88001e3d38500e102b96
|
import gflags
import httplib2
import re, sys, datetime
days = ["Mon", "Tue","Wed","Thu","Fri"]
## GCAL SETUP ##
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from dateutil.relativedelta import relativedelta
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications
# The client_id and client_secret can be found in Google Developers Console
FLOW = OAuth2WebServerFlow(
client_id='488425336833-1t9aredo5teef895153h37qpjavkth1g.apps.googleusercontent.com',
client_secret='5naasdZs5Bc7S9bjhWg8JezD',
scope='https://www.googleapis.com/auth/calendar',
user_agent='MancUniTimetableScanner/v0.1a')
# To disable the local server feature, uncomment the following line:
# FLAGS.auth_local_webserver = False
# If the Credentials don't exist or are invalid, run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google Developers Console
# to get a developerKey for your own application.
service = build(serviceName='calendar', version='v3', http=http,
developerKey='YOUR_DEVELOPER_KEY')
##END GCAL
##SCAN TEXT FILE
##Get what we're starting at
print("(be nice, you can break this easily)")
day = raw_input("What will the date be this coming monday? (dd/mm/yyyy): ").split("/")
week = int(raw_input("And what uni week number is that? "))
d = int(day[0])
m = int(day[1])
y = int(day[2])
class event():
def __init__(self, dayNum, Etime, name, location, weeks):
self.dayNum = dayNum
self.time = Etime.replace(" ", "").split("-")
self.location = location
self.weeks = weeks[5:].split(",")
self.name = name
self.weekArr = []
for i in self.weeks:
if "-" not in i:
self.weekArr.append(int(i))
else:
for j in range(int(i.split("-")[0]),int(i.split("-")[1].replace(" ",""))+1):
if j >= week:
self.weekArr.append(j)
def getName(self):
return self.name
def getTime(self):
return self.time
def getLocation(self):
return self.location
def getDates(self):
a = []
for i in self.weekArr:
a.append(
datetime.datetime(y,m,d) + relativedelta(days=self.dayNum+((i-week)*7))
)
return a
def getStart(self):
return self.time[0]
def getEnd(self):
return self.time[1]
try:
toOpen = sys.argv[1]
except Exception:
print "File not accessible"
sys.exit(1)
f = open(toOpen)
timetable = f.read().split("\n")
f.close()
events = []
##REGEX
dayReg = re.compile("[A-Z][a-z]*day")
timeReg = re.compile("[0-9]*:[0-9]* - [0-9]*:[0-9]*")
weeksReg = re.compile("Wks: [A-Z][a-z][0-9]*")
print timetable
for i in range(len(timetable)):
if dayReg.match(timetable[i]):
j = i+1
thisDay = days.index(timetable[i][:3])
try:
while not dayReg.match(timetable[j]):
if timeReg.match(timetable[j]):
if "tutorial" in timetable[j+1].lower():
timetable.insert(j+1, "Tutor")
newEv = event(thisDay, timetable[j],
timetable[j+1]+" " + timetable[j+4].strip(),
timetable[j+3].strip(),
timetable[j+5].strip()
)
events.append(newEv)
j += 1
except Exception as ex:
print timetable[j:j+5]
print ex
break
print str(len(events)) + " being added to calendar... "
def notIn(event, arr):
for i in arr:
if event['summary'] == i['summary'] and event['location'] == i['location']:
if event['start'] == i['start'] and event['end'] == i['end']:
return False
return True
toAdd = []
for i in events:
for j in i.getDates():
event = {
'summary': i.getName(),
'location': i.getLocation(),
'start': {
'dateTime': j.isoformat().split("T")[0] +'T'+i.getStart()+':00.000-00:00',
},
'end': {
'dateTime': j.isoformat().split("T")[0]+'T'+i.getEnd()+':00.000-00:00',
}
}
#print event
if notIn(event, toAdd):
toAdd.append(event)
for i in toAdd:
event = service.events().insert(calendarId='primary', body=i).execute()
|
FloatingGhost/GCalTimetable
|
Scanner.py
|
Python
|
gpl-2.0
| 5,203
|
[
"VisIt"
] |
b4f635c6fc6ebfcdfbdae7692f7d96987c7d7505e6cf67e5118cdcdc31fc5d7a
|
class Neuron:
outSynapse = list()
|
Nakou/MARVIN
|
neuronal/Neuron.py
|
Python
|
mit
| 43
|
[
"NEURON"
] |
ec67291c131593d15f911259969b98162021616cba636543904c57a985bed852
|
import os
import os.path
import sys
sys.path.insert(0, os.path.abspath('lib'))
from ansible.release import __version__, __author__
try:
from setuptools import setup, find_packages
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).")
sys.exit(1)
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
if not install_requirements:
print("Unable to read requirements from the requirements.txt file"
"That indicates this copy of the source code is incomplete.")
sys.exit(2)
# pycrypto or cryptography. We choose a default but allow the user to
# override it. This translates into pip install of the sdist deciding what
# package to install and also the runtime dependencies that pkg_resources
# knows about
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', None)
if crypto_backend:
install_requirements = [r for r in install_requirements if not (r.lower().startswith('pycrypto') or r.lower().startswith('cryptography'))]
install_requirements.append(crypto_backend)
SYMLINKS = {'ansible': frozenset(('ansible-console',
'ansible-doc',
'ansible-galaxy',
'ansible-playbook',
'ansible-pull',
'ansible-vault'))}
for source in SYMLINKS:
for dest in SYMLINKS[source]:
dest_path = os.path.join('bin', dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
setup(
name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
license='GPLv3',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
install_requires=install_requirements,
package_dir={'': 'lib'},
packages=find_packages('lib'),
package_data={
'': [
'module_utils/*.ps1',
'modules/windows/*.ps1',
'modules/windows/*.ps1',
'galaxy/data/*/*.*',
'galaxy/data/*/*/.*',
'galaxy/data/*/*/*.*',
'galaxy/data/*/tests/inventory'
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
],
data_files=[],
)
|
tux-00/ansible
|
setup.py
|
Python
|
gpl-3.0
| 3,659
|
[
"Galaxy"
] |
c5651fc3d5a9a8eb5d7a430e04f88c8d61b479411f6378534bffa5de2289df60
|
"""
Description:
Provides a pyGtk vtkRenderWindowInteractor widget. This embeds a
vtkRenderWindow inside a GTK widget and uses the
vtkGenericRenderWindowInteractor for the event handling. This is
based on vtkTkRenderWindow.py.
The class uses the gtkgl.GtkGLArea widget (gtkglarea). This avoids
a lot of problems with flicker.
There is a working example at the bottom.
Created by Prabhu Ramachandran, April 2002.
Bugs:
(*) There is a focus related problem. Tkinter has a focus object
that handles focus events. I dont know of an equivalent object
under GTK. So, when an 'enter_notify_event' is received on the
GtkVTKRenderWindow I grab the focus but I dont know what to do when
I get a 'leave_notify_event'.
(*) Will not work under Win32 because it uses the XID of a window in
OnRealize. Suggestions to fix this will be appreciated.
"""
import gtk, GDK, gtkgl
import vtk
import math
class GtkVTKRenderWindowInteractor(gtkgl.GtkGLArea):
""" Embeds a vtkRenderWindow into a pyGTK widget and uses
vtkGenericRenderWindowInteractor for the event handling. This
class embeds the RenderWindow correctly. A __getattr__ hook is
provided that makes the class behave like a
vtkGenericRenderWindowInteractor."""
def __init__(self, *args):
l = list(args)
attr = (gtkgl.RGBA, gtkgl.DOUBLEBUFFER)
l.insert(0, self)
l.insert(1, attr)
apply(gtkgl.GtkGLArea.__init__, l)
self._RenderWindow = vtk.vtkRenderWindow()
# private attributes
self.__Created = 0
self._ActiveButton = 0
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self.ConnectSignals()
# need this to be able to handle key_press events.
self.set_flags(gtk.CAN_FOCUS)
# default size
self.set_usize(300, 300)
def set_usize(self, w, h):
gtkgl.GtkGLArea.set_usize(self, w, h)
self._RenderWindow.SetSize(w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
def ConnectSignals(self):
self.connect("realize", self.OnRealize)
self.connect("expose_event", self.OnExpose)
self.connect("configure_event", self.OnConfigure)
self.connect("button_press_event", self.OnButtonDown)
self.connect("button_release_event", self.OnButtonUp)
self.connect("motion_notify_event", self.OnMouseMove)
self.connect("enter_notify_event", self.OnEnter)
self.connect("leave_notify_event", self.OnLeave)
self.connect("key_press_event", self.OnKeyPress)
self.connect("delete_event", self.OnDestroy)
self.add_events(GDK.EXPOSURE_MASK| GDK.BUTTON_PRESS_MASK |
GDK.BUTTON_RELEASE_MASK |
GDK.KEY_PRESS_MASK |
GDK.POINTER_MOTION_MASK |
GDK.POINTER_MOTION_HINT_MASK |
GDK.ENTER_NOTIFY_MASK | GDK.LEAVE_NOTIFY_MASK)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, event):
gtk.timeout_add(10, self._Iren.TimerEvent)
def DestroyTimer(self, obj, event):
"""The timer is a one shot timer so will expire automatically."""
return 1
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
if self.__Created:
self._RenderWindow.Render()
def OnRealize(self, *args):
if self.__Created == 0:
# you can't get the xid without the window being realized.
self.realize()
win_id = str(self.get_window().xid)
self._RenderWindow.SetWindowInfo(win_id)
self._Iren.Initialize()
self.__Created = 1
return gtk.TRUE
def OnConfigure(self, wid, event=None):
sz = self._RenderWindow.GetSize()
if (event.width != sz[0]) or (event.height != sz[1]):
self._Iren.SetSize(event.width, event.height)
self._Iren.ConfigureEvent()
return gtk.TRUE
def OnExpose(self, *args):
self.Render()
return gtk.TRUE
def OnDestroy(self, event=None):
self.hide()
del self._RenderWindow
self.destroy()
return gtk.TRUE
def _GetCtrlShift(self, event):
ctrl, shift = 0, 0
if ((event.state & GDK.CONTROL_MASK) == GDK.CONTROL_MASK):
ctrl = 1
if ((event.state & GDK.SHIFT_MASK) == GDK.SHIFT_MASK):
shift = 1
return ctrl, shift
def OnButtonDown(self, wid, event):
"""Mouse button pressed."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
button = event.button
if button == 3:
self._Iren.RightButtonPressEvent()
return gtk.TRUE
elif button == 1:
self._Iren.LeftButtonPressEvent()
return gtk.TRUE
elif button == 2:
self._Iren.MiddleButtonPressEvent()
return gtk.TRUE
else:
return gtk.FALSE
def OnButtonUp(self, wid, event):
"""Mouse button released."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
button = event.button
if button == 3:
self._Iren.RightButtonReleaseEvent()
return gtk.TRUE
elif button == 1:
self._Iren.LeftButtonReleaseEvent()
return gtk.TRUE
elif button == 2:
self._Iren.MiddleButtonReleaseEvent()
return gtk.TRUE
return gtk.FALSE
def OnMouseMove(self, wid, event):
"""Mouse has moved."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.MouseMoveEvent()
return gtk.TRUE
def OnEnter(self, wid, event):
"""Entering the vtkRenderWindow."""
self.grab_focus()
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.EnterEvent()
return gtk.TRUE
def OnLeave(self, wid, event):
"""Leaving the vtkRenderWindow."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
self._Iren.LeaveEvent()
return gtk.TRUE
def OnKeyPress(self, wid, event):
"""Key pressed."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
keycode, keysym = event.keyval, event.string
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
key, 0, keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
return gtk.TRUE
def OnKeyRelease(self, wid, event):
"Key released."
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
keycode, keysym = event.keyval, event.string
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
key, 0, keysym)
self._Iren.KeyReleaseEvent()
return gtk.TRUE
def Initialize(self):
if self.__Created:
self._Iren.Initialize()
def main():
# The main window
window = gtk.GtkWindow(gtk.WINDOW_TOPLEVEL)
window.set_title("A GtkVTKRenderWindow Demo!")
window.connect("destroy", gtk.mainquit)
window.connect("delete_event", gtk.mainquit)
window.set_border_width(10)
# A VBox into which widgets are packed.
vbox = gtk.GtkVBox(spacing=3)
window.add(vbox)
vbox.show()
# The GtkVTKRenderWindow
gvtk = GtkVTKRenderWindowInteractor()
#gvtk.SetDesiredUpdateRate(1000)
gvtk.set_usize(400, 400)
vbox.pack_start(gvtk)
gvtk.show()
gvtk.Initialize()
gvtk.Start()
# prevents 'q' from exiting the app.
gvtk.AddObserver("ExitEvent", lambda o,e,x=None: x)
# The VTK stuff.
cone = vtk.vtkConeSource()
cone.SetResolution(80)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#coneActor = vtk.vtkLODActor()
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0.5, 0.5, 1.0)
ren = vtk.vtkRenderer()
gvtk.GetRenderWindow().AddRenderer(ren)
ren.AddActor(coneActor)
# A simple quit button
quit = gtk.GtkButton("Quit!")
quit.connect("clicked", gtk.mainquit)
vbox.pack_start(quit)
quit.show()
# show the main window and start event processing.
window.show()
gtk.mainloop()
if __name__ == "__main__":
main()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Wrapping/Python/vtk/gtk/GtkVTKRenderWindowInteractor.py
|
Python
|
mit
| 10,171
|
[
"VTK"
] |
d75b508588bf8460f82582c8e69b05ed1fb5c9e30f50b2ff459b88692d161e53
|
"""A module that contains a base class that has helper methods for testing PyT."""
import unittest
from pyt.cfg import make_cfg
from pyt.core.ast_helper import generate_ast
from pyt.core.module_definitions import project_definitions
from pyt.core.transformer import PytTransformer
class BaseTestCase(unittest.TestCase):
"""A base class that has helper methods for testing PyT."""
def assert_length(self, _list, *, expected_length, msg=None):
actual_length = len(_list)
self.assertEqual(expected_length, actual_length, msg=msg)
def cfg_create_from_file(
self,
filename,
project_modules=list(),
local_modules=list()
):
project_definitions.clear()
tree = generate_ast(filename)
self.cfg = make_cfg(
tree,
project_modules,
local_modules,
filename
)
def cfg_create_from_ast(
self,
ast_tree,
project_modules=list(),
local_modules=list()
):
project_definitions.clear()
self.cfg = make_cfg(
PytTransformer().visit(ast_tree),
project_modules,
local_modules,
filename='?'
)
|
python-security/pyt
|
tests/base_test_case.py
|
Python
|
gpl-2.0
| 1,226
|
[
"VisIt"
] |
e99870b74e5e627a2189777e3a7d6a1691842f8e73ec924d0be986df2a84f5cc
|
# -*- coding: utf-8 -*-
"""
@file bible/api.py
@author Brian Kim
@brief the definition of routes for the api
"""
from flask import Blueprint, request, abort
import controller, methods, model
from util import *
import traceback as tb
api = Blueprint('api',__name__)
parse_dispatch = {
'lang' : controller.parse_lang,
'version' : controller.parse_version,
'book' : controller.parse_book,
'verse' : controller.parse_verse
}
post_dispatch = {
'lang' : controller.add_lang,
'version' : controller.add_version,
'book' : controller.add_book,
'verse' : controller.add_verse
}
@api.route('/', methods=['GET','POST'])
def api_root():
if request.method == 'GET':
return jsonify({'title':'Bible API','author':'BreadTech'})
elif request.method == 'POST':
# add a verse to the api
what = request.form.get('what')
x = parse_dispatch[what]()
y = post_dispatch[what](**x)
return jsonify(y)
dispatch = {
'lang': {'GET':controller.get_lang_all,'POST':controller.add_lang},
'version': {'GET':controller.get_version_all,'POST':controller.add_version},
'book': {'GET':controller.get_book_all,'POST':controller.add_book},
}
dispatch_id = {
'lang': {'GET':controller.get_lang,'PUT':controller.set_lang,'DELETE':controller.rm_lang},
'version': {'GET':controller.get_version,'PUT':controller.set_version,'DELETE':controller.rm_version},
'book': {'GET':controller.get_book,'PUT':controller.set_book,'DELETE':controller.rm_book},
}
@api.route('/<category>', methods=['GET','POST'])
def api_category(category):
method = dispatch.get(category)
if not method: abort(405)
try:
return method.get(request.method)()
except (AlreadyExistsException, BadDataException, NotFoundException) as e:
abort(e.code)
except Exception:
print(tb.format_exc())
abort(500)
clean_dispatch = {
'lang' : clean_lang_abbr,
'version' : clean_version_abbr,
'book' : clean_book_abbr
}
@api.route('/<category>/<name>', methods=['GET','PUT','DELETE','POST'])
def api_category_name(category,name):
# look up on the dispatch
method = dispatch_id.get(category)
clean_method = clean_dispatch.get(category)
if not method:
abort (405)
try:
abbr = clean_method(name)
return method.get(request.method)(abbr)
except (BadDataException, NotFoundException) as e:
abort(e.code)
except Exception:
print(tb.format_exc())
abort(500)
@api.route('/<version>/<book>/<int:chnum>', methods=['GET','POST'])
def get_chapter(version,book,chnum):
try:
v = version.upper()
b = clean_book_abbr(book)
if request.method == 'GET':
x = controller.get_chapter(v,b,chnum)
return x
elif request.method == 'POST':
x = controller.add_verse(v,b,chnum)
return x
except (NotFoundException,BadDataException,AlreadyExistsException) as e:
print(tb.format_exc())
abort(e.code)
except Exception:
print(tb.format_exc())
abort(500)
@api.route('/<version>/<book>/<int:chnum>/<int:verse>', methods=['GET','POST','PUT','DELETE'])
def handle_verse(version,book,chnum,verse):
try:
data = parse_verse_path(version,book,chnum,verse)
if request.method == 'GET':
return controller.get_verse(**data)
elif request.method == 'POST':
request.form = {'number':data['verse_a'],'text':request.form['text']}
return controller.add_verse(**data)
elif request.method == 'PUT':
return controller.set_verse(**data)
elif request.method == 'DELETE':
return controller.rm_verse(**data)
except (NotFoundException,BadDataException,AlreadyExistsException) as e:
abort(e.code)
except Exception:
print(tb.format_exc())
abort(500)
@api.route('/<version>/<book>/<ch_a>/<verse_a>/<verse_b>', methods=['GET'])
def handle_verse_range(version,book,ch_a,verse_a,verse_b):
return handle_verse_range_ch(version,book,ch_a,verse_a,ch_a,verse_b)
@api.route('/<version>/<book>/<ch_a>/<verse_a>/<ch_b>/<verse_b>', methods=['GET'])
def handle_verse_range_ch(version,book,ch_a,verse_a,ch_b,verse_b):
try:
data = parse_verse_path(version,book,ch_a,verse_a,ch_b,verse_b)
return controller.get_verse_range(**data)
except NotFoundException as e:
abort(e.code)
except Exception:
abort(500)
|
briansan/bible
|
bible/api.py
|
Python
|
bsd-2-clause
| 4,238
|
[
"Brian"
] |
0936bbf8da571de22b7f47310602cb120ead47b33ead77a76569c11eed2b5bba
|
import numpy as np
import ctypes as ct
import ast
from ctree.jit import LazySpecializedFunction, ConcreteSpecializedFunction
from ctree.transformations import PyBasicConversions
from ctree.transforms.declaration_filler import DeclarationFiller
from ctree.c.nodes import CFile
import ctree.c.nodes as C
from ctree.nodes import Project
from ctree.types import get_ctype
from ctree.templates.nodes import StringTemplate
def get_nd_pointer(arg):
return np.ctypeslib.ndpointer(arg.dtype, arg.ndim, arg.shape)
class HwachaFN(ConcreteSpecializedFunction):
def finalize(self, entry_point_name, project_node, entry_typesig):
self._c_function = self._compile(entry_point_name, project_node,
entry_typesig)
return self
def __call__(self, *args):
return self._c_function(*args)
class MapTransformer(ast.NodeTransformer):
def __init__(self, loopvar, param_dict, retval_name):
self.loopvar = loopvar
self.param_dict = param_dict
self.retval_name = retval_name
def visit_SymbolRef(self, node):
if node.name in self.param_dict:
return C.ArrayRef(node, C.SymbolRef(self.loopvar))
return node
def visit_Return(self, node):
node.value = self.visit(node.value)
return C.Assign(C.ArrayRef(C.SymbolRef(self.retval_name),
C.SymbolRef(self.loopvar)),
node.value)
hwacha_configure_block = """
size_t vector_length;
__asm__ volatile (
"vsetcfg 16, 1\\n"
"vsetvl %0, %1\\n"
: "=r"(vector_length)
: "r"({SIZE})
);
"""
bounds_check = """
if ({SIZE} == {loopvar}) continue;
"""
class ScalarFinder(ast.NodeVisitor):
def __init__(self, scalars):
self.scalars = scalars
def visit_Constant(self, node):
self.scalars.add(node.value)
def get_scalars_in_body(node):
scalars = set()
visitor = ScalarFinder(scalars)
for stmt in node.body:
visitor.visit(stmt)
return scalars
number_dict = {
"1": "one",
"2": "two",
"3": "three",
"4": "four",
"5": "five",
"6": "six",
"7": "seven",
"8": "eight",
"9": "nine",
"0": "zero",
".": "dot"
}
def scalar_init(scalar):
name = "".join(number_dict[digit] for digit in str(scalar))
return StringTemplate("""
union {{
float f;
uint32_t i;
}} {name};
{name}.f = {scalar}f;
""".format(name=name, scalar=scalar))
obtained_vector_length = """
size_t obtained_vector_length;
__asm__ volatile(
"vsetvl %0, %1\\n"
: "=r"(obtained_vector_length)
: "r"({SIZE} - {loopvar})
);
assert(obtained_vector_length <= {SIZE});
"""
class ArrayRefFinder(ast.NodeVisitor):
def __init__(self, refs):
self.refs = refs
def visit_BinaryOp(self, node):
if isinstance(node.op, C.Op.ArrayRef):
self.refs.append(node)
else:
self.visit(node.left)
self.visit(node.right)
def get_array_references_in_body(node):
refs = []
finder = ArrayRefFinder(refs)
for stmt in node.body:
finder.visit(stmt)
return refs
class HwachaASMTranslator(ast.NodeTransformer):
def __init__(self, scalars, ref_register_map, body, type_map):
self.scalars = scalars
self.ref_register_map = ref_register_map
self.body = body
self.curr_register = -1
self.reg_map = {}
self.type_map = type_map
def get_next_register(self):
self.curr_register += 1
return "vv{}".format(self.curr_register)
def visit_SymbolRef(self, node):
if node.name in self.reg_map:
return self.reg_map[node.name]
return node
def visit_Cast(self, node):
reg = self.get_next_register()
value = self.visit(node.value)
if isinstance(node.type, ct.c_float):
self.body.append(" vfcvt.s.w {0}, {1}\\n".format(reg, value))
self.type_map[reg] = ct.c_float
return reg
else:
raise NotImplementedError()
def visit_Constant(self, node):
self.type_map[node.value] = get_ctype(node.value)
return self.scalars[node.value]
def visit_FunctionCall(self, node):
if node.func.name == 'max':
arg1 = self.visit(node.args[0])
arg2 = self.visit(node.args[1])
reg = self.get_next_register()
print(node)
print(arg1)
if self.type_map[arg1] == ct.c_float or \
self.type_map[arg2] == ct.c_float:
self.body.append(" vfmax.s {0}, {1}, {2}\\n".format(
reg, arg1, arg2
))
self.type_map[reg] = ct.c_float
return reg
elif node.func.name == 'min':
arg1 = self.visit(node.args[0])
arg2 = self.visit(node.args[1])
reg = self.get_next_register()
if self.type_map[arg1] == ct.c_float or \
self.type_map[arg2] == ct.c_float:
self.body.append(" vfmin.s {0}, {1}, {2}\\n".format(
reg, arg1, arg2
))
self.type_map[reg] = ct.c_float
return reg
raise NotImplementedError()
def visit_BinaryOp(self, node):
if isinstance(node.op, C.Op.ArrayRef):
reg = self.get_next_register()
self.body.append(" vlwu {0}, {1}\\n".format(
reg,
self.ref_register_map[str(node)][1]))
return reg
if isinstance(node.op, C.Op.Assign):
node.right = self.visit(node.right)
if isinstance(node.left, C.SymbolRef):
self.reg_map[node.left.name] = node.right
return
elif isinstance(node.left, C.BinaryOp) and \
isinstance(node.left.op, C.Op.ArrayRef):
if self.type_map[node.left.left.name] != self.type_map[node.right]:
reg = self.get_next_register()
self.body.append(" vfcvt.w.s {0}, {1}\\n".format(reg, node.right))
self.body.append(" vsw {0}, {1}\\n".format(reg,
self.ref_register_map[str(node.left)][1]))
return
node.left = self.visit(node.left)
node.right = self.visit(node.right)
reg = self.get_next_register()
if isinstance(node.op, C.Op.Sub):
self.body.append(" vsub {0}, {1}, {2}\\n".format(
reg, node.left, node.right))
elif isinstance(node.op, C.Op.Div):
if self.type_map[node.left] == ct.c_float or \
self.type_map[node.right] == ct.c_float:
self.body.append(" vfdiv.s {0}, {1}, {2}\\n".format(
reg, node.left, node.right))
self.type_map[reg] = ct.c_float
else:
raise NotImplementedError()
elif isinstance(node.op, C.Op.Mul):
if self.type_map[node.left] == ct.c_float or \
self.type_map[node.right] == ct.c_float:
self.body.append(" vfmul.s {0}, {1}, {2}\\n".format(
reg, node.left, node.right))
self.type_map[reg] = ct.c_float
else:
raise NotImplementedError()
return reg
def get_asm_body(node, scalars, refs, type_map):
body = """
__asm__ volatile (
".align 3\\n"
"__hwacha_body:\\n"
"""
asm_body = []
translator = HwachaASMTranslator(scalars, refs, asm_body, type_map)
for s in node.body:
translator.visit(s)
for s in asm_body:
body += "\"" + s + "\"\n"
body += "\" vstop\\n\"\n"
body += " );"
return StringTemplate(body)
class HwachaVectorize(ast.NodeTransformer):
def __init__(self, type_map, defns):
self.type_map = type_map
self.defns = defns
def visit_For(self, node):
if node.pragma == "ivdep":
block = []
loopvar = node.incr.arg
size = node.test.right
scalars = get_scalars_in_body(node)
refs = get_array_references_in_body(node)
ref_register_map = {}
scalar_register_map = {}
for index, ref in enumerate(refs):
ref_register_map[str(ref)] = (ref, "va{}".format(index))
for index, scalar in enumerate(scalars):
reg = "vs{}".format(index)
scalar_register_map[scalar] = reg
self.type_map[reg] = get_ctype(scalar)
body = []
block.append(StringTemplate(hwacha_configure_block.format(SIZE=size)))
node.incr = C.AddAssign(loopvar, C.SymbolRef("vector_length"))
self.defns.append(get_asm_body(node, scalar_register_map,
ref_register_map, self.type_map))
block.append(node)
body.append(StringTemplate(bounds_check.format(SIZE=size,
loopvar=loopvar)))
for scalar in scalars:
body.append(scalar_init(scalar))
body.append(StringTemplate(obtained_vector_length.format(SIZE=size,
loopvar=loopvar)))
block1 = ""
block2 = ""
index = 0
for _, info in ref_register_map.items():
ref, register = info
block1 += "\t \"vmsa {0}, %{1}\\n\"\n".format(register, index)
block2 += "\"r\"({0} + {1}),\n".format(
ref.left.name, ref.right.name)
index += 1
for scalar, register in scalar_register_map.items():
block1 += "\t \"vmss {0}, %{1}\\n\"\n".format(register, index)
block2 += "\"r\"({0}.i),\n".format(
"".join(number_dict[digit] for digit in str(scalar)))
index += 1
block1 += "\"fence\\n\"\n"
block1 += "\"vf 0(%{0})\\n\"\n".format(index)
block2 += "\"r\" (&__hwacha_body)"
body.append(StringTemplate(
"""
__asm__ volatile(
{block1}
:
: {block2}
: "memory"
);
""".format(block1=block1, block2=block2)))
node.body = body
block.append(
StringTemplate("""
__asm__ volatile(
"fence\\n"
);
"""))
return block
class HwachaTranslator(LazySpecializedFunction):
def args_to_subconfig(self, args):
return tuple(get_nd_pointer(arg) for arg in args)
def transform(self, py_ast, program_cfg):
arg_cfg, tune_cfg = program_cfg
tree = PyBasicConversions().visit(py_ast)
param_dict = {}
tree.body[0].params.append(C.SymbolRef("retval", arg_cfg[0]()))
# Annotate arguments
for param, type in zip(tree.body[0].params, arg_cfg):
param.type = type()
param_dict[param.name] = type._dtype_
length = np.prod(arg_cfg[0]._shape_)
transformer = MapTransformer("i", param_dict, "retval")
body = list(map(transformer.visit, tree.body[0].defn))
tree.body[0].defn = [C.For(
C.Assign(C.SymbolRef("i", ct.c_int()), C.Constant(0)),
C.Lt(C.SymbolRef("i"), C.Constant(length)),
C.PostInc(C.SymbolRef("i")),
body=body,
pragma="ivdep"
)]
tree = DeclarationFiller().visit(tree)
defns = []
tree = HwachaVectorize(param_dict, defns).visit(tree)
file_body = [
StringTemplate("#include <stdlib.h>"),
StringTemplate("#include <stdint.h>"),
StringTemplate("#include <assert.h>"),
StringTemplate("extern \"C\" void __hwacha_body(void);"),
]
file_body.extend(defns)
file_body.append(tree)
return [CFile("generated", file_body)]
def finalize(self, transform_result, program_config):
generated = transform_result[0]
print(generated)
proj = Project([generated])
entry_type = ct.CFUNCTYPE(None, *program_config[0])
return HwachaFN().finalize("apply", proj, entry_type)
def hwacha_map(fn, *args):
mapfn = HwachaTranslator.from_function(fn, "map")
retval = np.empty_like(args[0])
args += (retval, )
mapfn(*args)
return retval
CALIBRATE_COLD = 0x7000
CALIBRATE_HOT = 0xA000
SIZE = (208 * 156)
# Generate a dummy calibration table, just so there's something
# to execute.
cold = np.full(SIZE, CALIBRATE_COLD, np.int32)
hot = np.full(SIZE, CALIBRATE_HOT, np.int32)
# Generate a dummy input image, again just so there's something
# to execute.
raw = np.empty(SIZE, np.int32)
for i in range(SIZE):
scale = (CALIBRATE_HOT - CALIBRATE_COLD)
percent = (i % 120) - 10
raw[i] = scale * (percent / 100.0) + CALIBRATE_COLD
raw[i] = CALIBRATE_COLD + (i % (int)(scale - 2)) + 1
def gold(cold, hot, raw, flat):
for i in range(208 * 156):
_max = hot[i]
_min = cold[i]
offset = raw[i] - _min
scale = _max - _min
foffset = float(offset)
fscale = float(scale)
scaled = foffset / fscale
scaled = min(1.0, scaled)
scaled = max(0.0, scaled)
flat[i] = 255 * scaled
def test_map(cold, hot, raw):
_max = hot
_min = cold
offset = raw - _min
scale = _max - _min
foffset = float(offset)
fscale = float(scale)
scaled = foffset / fscale
scaled = min(1.0, scaled)
scaled = max(0.0, scaled)
return 255.0 * scaled
flat_gold = np.empty_like(raw)
gold(cold, hot, raw, flat_gold)
flat_test = hwacha_map(test_map, cold, hot, raw)
np.testing.assert_array_equal(flat_gold, flat_test)
|
ucb-sejits/ctree
|
examples/hwacha.py
|
Python
|
bsd-2-clause
| 13,736
|
[
"VisIt"
] |
d35b82e4cdf80764171dad80a699fb435ec1b1f28ec94e32c93429fc400545eb
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 10
tab.settings['Mass definition'] = 'average'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix permittivity'] = 3.0
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Maxwell-Garnett sphere'
# Add new scenarios
methods = [ 'Maxwell-Garnett', 'Bruggeman']
shapes = ['Sphere', 'Needle']
vfs = [ 0.1, 0.2 ]
hkl = [0,0,1]
for method in methods:
for vf in vfs:
for shape in shapes:
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Particle shape'] = shape
tab.settings['Effective medium method'] = method
tab.settings['Volume fraction'] = vf
tab.settings['Legend'] = method+' vf='+str(vf)+' '+shape
if shape != 'Sphere':
tab.settings['Legend'] = method+' vf='+str(vf)+' '+shape+' '+str(hkl)
tab.settings['Effective medium method'] = 'Averaged Permittivity'
tab.settings['Particle shape'] = 'Sphere'
for vf in vfs:
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Legend'] = 'AP vf='+str(vf)
self.notebook.deleteScenario(0)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 300
tab.settings['Maximum frequency'] = 800
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Castep MgO'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
JohnKendrick/PDielec
|
Examples/Castep/MgO/script.py
|
Python
|
mit
| 2,364
|
[
"CASTEP"
] |
57947a1a5d9381f0f8d2e1e2815b3fc83fc28e99c02a609b28c21571238f100d
|
from Sire.Tools import Nautilus
from Sire.Tools import readParams
import Sire.Config
import argparse
import os
import sys
parser = argparse.ArgumentParser(description="Subgrids, subtracts grids in dx format to get differences where gridf.dx-"
"gridl.dx=diff.dx. Grids must be of identical dimensions and grid density ",
epilog="nautilus-subgrids is built using Sire, Numpy and mdtraj and is distributed "
"under the GPL. For more information please visit "
"http://siremol.org/nautilus",
prog="nautilus")
parser.add_argument('--author', action="store_true",
help="Get information about the authors of this script.")
parser.add_argument('--version', action="store_true",
help="Get version information about this script.")
parser.add_argument('-gf', '--gridf', nargs="?",
help="Grid dx file f to be subtracted")
parser.add_argument('-gl', '--gridl', nargs="?",
help="Grid dx file l used to subtract")
parser.add_argument('-d', '--diffdx', nargs="?",
help="Name of difference dx file")
parser.add_argument('-b', '--benchmark', action='store_true',
help="Benchmark the Nautilus subroutines.")
sys.stdout.write("\n")
args = parser.parse_args()
must_exit = False
if args.author:
print("\n nautilus-subgrids was written by Georgios Gerogiokas and Julien Michel (C) 2014")
print("It is based on the Nautilus Sire module.")
must_exit = True
if args.version:
print("nautilus-subgrids -- from Sire release version <%s>" %Sire.__version__)
print("This particular release can be downloaded here: "
"https://github.com/michellab/Sire/releases/tag/v%s" %Sire.__version__)
must_exit = True
if must_exit:
sys.exit(0)
# If we have been given a CONFIG file, read it now
params = {}
if args.gridf:
gridf = args.gridf
params["gridf"] = gridf
elif "gridf" in params:
gridf = params["gridf"]
else:
gridf = "gridf.dx"
params["gridf"] = gridf
if args.gridl:
gridl = args.gridl
params["gridl"] = gridl
elif "gridl" in params:
gridl = params["gridl"]
else:
gridl = "gridl.dx"
params["gridl"] = gridl
if args.diffdx:
diffdx = args.diffdx
params["diffdx"] = diffdx
elif "diffdx" in params:
diffdx= params["diffdx"]
else:
diffdx = "diff.dx"
params["diffdx"] = diffdx
if args.benchmark:
params["benchmark"] = True
#print (params)
if not (os.path.exists(gridf) and os.path.exists(gridl)):
parser.print_help()
print("\nPlease supply the names of an dx files to be subtracted.")
if not os.path.exists(os.path.exists(gridf) and os.path.exists(gridl)):
print("(cannot find dx files %s %s)" % (gridf, gridl))
sys.exit(-1)
print("\nRunning nautilus-subgrids.py using files %s %s" % (gridf, gridl) )
Nautilus.subgrids(params)
|
michellab/Sire
|
wrapper/python/scripts/nautilus-subgrids.py
|
Python
|
gpl-2.0
| 3,048
|
[
"MDTraj",
"VisIt"
] |
89d9bf770dde947aaf1b14cc8f9be13f0e91b410224ee13ad6556abd935f5084
|
from director import objectmodel as om
from director import visualization as vis
from director import vtkAll as vtk
from director import transformUtils
from director import filterUtils
from director.timercallback import TimerCallback
class AffordanceGraspUpdater(object):
def __init__(self, robotModel, ikPlanner, extraModels=None):
self.robotModel = robotModel
self.ikPlanner = ikPlanner
self.frameSyncs = {}
self.attachedAffordances = {}
models = [robotModel]
if extraModels:
models.extend(extraModels)
for model in models:
model.connectModelChanged(self.onRobotModelChanged)
def onRobotModelChanged(self, model):
linkNames = []
for handModel in self.ikPlanner.handModels:
linkNames.append(handModel.handLinkName)
#linkNames = [self.ikPlanner.getHandLink('left') , self.ikPlanner.getHandLink('right')]
for linkName in linkNames:
self.updateLinkFrame(model, linkName, create=False)
def getAffordanceFrame(self, affordanceName):
frame = om.findObjectByName(affordanceName + ' frame')
assert frame
return frame
def updateLinkFrame(self, robotModel, linkName, create=True):
linkFrameName = '%s frame' % linkName
if not create and not om.findObjectByName(linkFrameName):
return
t = robotModel.getLinkFrame(linkName)
return vis.updateFrame(t, linkFrameName, scale=0.2, visible=False, parent=self.robotModel)
def hasAffordance(self, affordanceName):
return affordanceName in self.frameSyncs
def graspAffordance(self, affordanceName, side):
if affordanceName in self.frameSyncs:
return
affordanceFrame = self.getAffordanceFrame(affordanceName)
#linkName = 'l_hand' if side == 'left' else 'r_hand'
linkName = self.ikPlanner.getHandLink(side)
linkFrame = self.updateLinkFrame(self.robotModel, linkName)
frameSync = vis.FrameSync()
frameSync.addFrame(linkFrame)
frameSync.addFrame(affordanceFrame, ignoreIncoming=True)
self.frameSyncs[affordanceName] = frameSync
self.attachedAffordances[affordanceName] = linkName
def ungraspAffordance(self, affordanceName):
try:
del self.frameSyncs[affordanceName]
del self.attachedAffordances[affordanceName]
except KeyError:
pass
if not self.frameSyncs:
om.removeFromObjectModel(om.findObjectByName('l_hand frame'))
om.removeFromObjectModel(om.findObjectByName('r_hand frame'))
class AffordanceInCameraUpdater(object):
def __init__(self, affordanceManager, imageView):
self.affordanceManager = affordanceManager
self.prependImageName = False
self.projectFootsteps = False
self.projectAffordances = True
self.extraObjects = []
self.imageView = imageView
self.imageQueue = imageView.imageManager.queue
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.update
def getOverlayRenderer(self, imageView):
if not hasattr(imageView, 'overlayRenderer'):
renWin = imageView.view.renderWindow()
renWin.SetNumberOfLayers(2)
ren = vtk.vtkRenderer()
ren.SetLayer(1)
ren.SetActiveCamera(imageView.view.camera())
renWin.AddRenderer(ren)
imageView.overlayRenderer = ren
return imageView.overlayRenderer
def addActorToImageOverlay(self, obj, imageView):
obj.addToView(imageView.view)
imageView.view.renderer().RemoveActor(obj.actor)
renderers = obj.extraViewRenderers.setdefault(imageView.view, [])
overlayRenderer = self.getOverlayRenderer(imageView)
if overlayRenderer not in renderers:
overlayRenderer.AddActor(obj.actor)
renderers.append(overlayRenderer)
def getFolderName(self):
if self.prependImageName:
return self.imageView.imageName + ' camera overlay'
else:
return 'camera overlay'
def setupObjectInCamera(self, obj):
imageView = self.imageView
obj = vis.updatePolyData(vtk.vtkPolyData(), self.getTransformedName(obj), view=imageView.view, color=obj.getProperty('Color'), parent=self.getFolderName(), visible=obj.getProperty('Visible'))
self.addActorToImageOverlay(obj, imageView)
return obj
def getTransformedName(self, obj):
if self.prependImageName:
return 'overlay ' + self.imageView.imageName + ' ' + obj.getProperty('Name')
else:
return 'overlay ' + obj.getProperty('Name')
def getFootsteps(self):
plan = om.findObjectByName('footstep plan')
if plan:
return [child for child in plan.children() if child.getProperty('Name').startswith('step ')]
else:
return []
def getObjectsToUpdate(self):
objs = []
if self.projectAffordances:
objs += self.affordanceManager.getAffordances()
if self.projectFootsteps:
objs += self.getFootsteps()
objs += self.extraObjects
return objs
def getObjectInCamera(self, obj):
overlayObj = om.findObjectByName(self.getTransformedName(obj))
return overlayObj or self.setupObjectInCamera(obj)
def cleanUp(self):
self.timer.stop()
om.removeFromObjectModel(om.findObjectByName(self.getFolderName()))
def update(self):
imageView = self.imageView
if not imageView.imageInitialized:
return
if not imageView.view.isVisible():
return
updated = set()
for obj in self.getObjectsToUpdate():
cameraObj = self.getObjectInCamera(obj)
self.updateObjectInCamera(obj, cameraObj)
updated.add(cameraObj)
folder = om.findObjectByName(self.getFolderName())
if folder:
for child in folder.children():
if child not in updated:
om.removeFromObjectModel(child)
def updateObjectInCamera(self, obj, cameraObj):
imageView = self.imageView
objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())
localToCameraT = vtk.vtkTransform()
self.imageQueue.getTransform('local', imageView.imageName, localToCameraT)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(objToLocalT)
t.Concatenate(localToCameraT)
pd = filterUtils.transformPolyData(obj.polyData, t)
'''
normals = pd.GetPointData().GetNormals()
cameraToImageT = vtk.vtkTransform()
imageQueue.getCameraProjectionTransform(imageView.imageName, cameraToImageT)
pd = filterUtils.transformPolyData(pd, cameraToImageT)
pts = vnp.getNumpyFromVtk(pd, 'Points')
pts[:,0] /= pts[:,2]
pts[:,1] /= pts[:,2]
pd.GetPointData().SetNormals(normals)
'''
self.imageQueue.projectPoints(imageView.imageName, pd)
cameraObj.setPolyData(pd)
self.addActorToImageOverlay(cameraObj, imageView)
|
patmarion/director
|
src/python/director/affordanceupdater.py
|
Python
|
bsd-3-clause
| 7,248
|
[
"VTK"
] |
95959fb18370be93b9d9908ce73f8508d49b33ad9eb5decf4e89d94394aa45b1
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read data
#
reader = vtk.vtkStructuredGridReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/office.binary.vtk")
reader.Update()
length = reader.GetOutput().GetLength()
maxVelocity = reader.GetOutput().GetPointData().GetVectors().GetMaxNorm()
maxTime = 35.0 * length / maxVelocity
table1 = vtk.vtkStructuredGridGeometryFilter()
table1.SetInputConnection(reader.GetOutputPort())
table1.SetExtent(11, 15, 7, 9, 8, 8)
mapTable1 = vtk.vtkPolyDataMapper()
mapTable1.SetInputConnection(table1.GetOutputPort())
mapTable1.ScalarVisibilityOff()
table1Actor = vtk.vtkActor()
table1Actor.SetMapper(mapTable1)
table1Actor.GetProperty().SetColor(.59, .427, .392)
table2 = vtk.vtkStructuredGridGeometryFilter()
table2.SetInputConnection(reader.GetOutputPort())
table2.SetExtent(11, 15, 10, 12, 8, 8)
mapTable2 = vtk.vtkPolyDataMapper()
mapTable2.SetInputConnection(table2.GetOutputPort())
mapTable2.ScalarVisibilityOff()
table2Actor = vtk.vtkActor()
table2Actor.SetMapper(mapTable2)
table2Actor.GetProperty().SetColor(.59, .427, .392)
FilingCabinet1 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet1.SetInputConnection(reader.GetOutputPort())
FilingCabinet1.SetExtent(15, 15, 7, 9, 0, 8)
mapFilingCabinet1 = vtk.vtkPolyDataMapper()
mapFilingCabinet1.SetInputConnection(FilingCabinet1.GetOutputPort())
mapFilingCabinet1.ScalarVisibilityOff()
FilingCabinet1Actor = vtk.vtkActor()
FilingCabinet1Actor.SetMapper(mapFilingCabinet1)
FilingCabinet1Actor.GetProperty().SetColor(.8, .8, .6)
FilingCabinet2 = vtk.vtkStructuredGridGeometryFilter()
FilingCabinet2.SetInputConnection(reader.GetOutputPort())
FilingCabinet2.SetExtent(15, 15, 10, 12, 0, 8)
mapFilingCabinet2 = vtk.vtkPolyDataMapper()
mapFilingCabinet2.SetInputConnection(FilingCabinet2.GetOutputPort())
mapFilingCabinet2.ScalarVisibilityOff()
FilingCabinet2Actor = vtk.vtkActor()
FilingCabinet2Actor.SetMapper(mapFilingCabinet2)
FilingCabinet2Actor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Top.SetInputConnection(reader.GetOutputPort())
bookshelf1Top.SetExtent(13, 13, 0, 4, 0, 11)
mapBookshelf1Top = vtk.vtkPolyDataMapper()
mapBookshelf1Top.SetInputConnection(bookshelf1Top.GetOutputPort())
mapBookshelf1Top.ScalarVisibilityOff()
bookshelf1TopActor = vtk.vtkActor()
bookshelf1TopActor.SetMapper(mapBookshelf1Top)
bookshelf1TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf1Bottom.SetExtent(20, 20, 0, 4, 0, 11)
mapBookshelf1Bottom = vtk.vtkPolyDataMapper()
mapBookshelf1Bottom.SetInputConnection(bookshelf1Bottom.GetOutputPort())
mapBookshelf1Bottom.ScalarVisibilityOff()
bookshelf1BottomActor = vtk.vtkActor()
bookshelf1BottomActor.SetMapper(mapBookshelf1Bottom)
bookshelf1BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Front.SetInputConnection(reader.GetOutputPort())
bookshelf1Front.SetExtent(13, 20, 0, 0, 0, 11)
mapBookshelf1Front = vtk.vtkPolyDataMapper()
mapBookshelf1Front.SetInputConnection(bookshelf1Front.GetOutputPort())
mapBookshelf1Front.ScalarVisibilityOff()
bookshelf1FrontActor = vtk.vtkActor()
bookshelf1FrontActor.SetMapper(mapBookshelf1Front)
bookshelf1FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf1Back.SetInputConnection(reader.GetOutputPort())
bookshelf1Back.SetExtent(13, 20, 4, 4, 0, 11)
mapBookshelf1Back = vtk.vtkPolyDataMapper()
mapBookshelf1Back.SetInputConnection(bookshelf1Back.GetOutputPort())
mapBookshelf1Back.ScalarVisibilityOff()
bookshelf1BackActor = vtk.vtkActor()
bookshelf1BackActor.SetMapper(mapBookshelf1Back)
bookshelf1BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1LHS.SetInputConnection(reader.GetOutputPort())
bookshelf1LHS.SetExtent(13, 20, 0, 4, 0, 0)
mapBookshelf1LHS = vtk.vtkPolyDataMapper()
mapBookshelf1LHS.SetInputConnection(bookshelf1LHS.GetOutputPort())
mapBookshelf1LHS.ScalarVisibilityOff()
bookshelf1LHSActor = vtk.vtkActor()
bookshelf1LHSActor.SetMapper(mapBookshelf1LHS)
bookshelf1LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf1RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf1RHS.SetInputConnection(reader.GetOutputPort())
bookshelf1RHS.SetExtent(13, 20, 0, 4, 11, 11)
mapBookshelf1RHS = vtk.vtkPolyDataMapper()
mapBookshelf1RHS.SetInputConnection(bookshelf1RHS.GetOutputPort())
mapBookshelf1RHS.ScalarVisibilityOff()
bookshelf1RHSActor = vtk.vtkActor()
bookshelf1RHSActor.SetMapper(mapBookshelf1RHS)
bookshelf1RHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Top = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Top.SetInputConnection(reader.GetOutputPort())
bookshelf2Top.SetExtent(13, 13, 15, 19, 0, 11)
mapBookshelf2Top = vtk.vtkPolyDataMapper()
mapBookshelf2Top.SetInputConnection(bookshelf2Top.GetOutputPort())
mapBookshelf2Top.ScalarVisibilityOff()
bookshelf2TopActor = vtk.vtkActor()
bookshelf2TopActor.SetMapper(mapBookshelf2Top)
bookshelf2TopActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Bottom = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Bottom.SetInputConnection(reader.GetOutputPort())
bookshelf2Bottom.SetExtent(20, 20, 15, 19, 0, 11)
mapBookshelf2Bottom = vtk.vtkPolyDataMapper()
mapBookshelf2Bottom.SetInputConnection(bookshelf2Bottom.GetOutputPort())
mapBookshelf2Bottom.ScalarVisibilityOff()
bookshelf2BottomActor = vtk.vtkActor()
bookshelf2BottomActor.SetMapper(mapBookshelf2Bottom)
bookshelf2BottomActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Front = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Front.SetInputConnection(reader.GetOutputPort())
bookshelf2Front.SetExtent(13, 20, 15, 15, 0, 11)
mapBookshelf2Front = vtk.vtkPolyDataMapper()
mapBookshelf2Front.SetInputConnection(bookshelf2Front.GetOutputPort())
mapBookshelf2Front.ScalarVisibilityOff()
bookshelf2FrontActor = vtk.vtkActor()
bookshelf2FrontActor.SetMapper(mapBookshelf2Front)
bookshelf2FrontActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2Back = vtk.vtkStructuredGridGeometryFilter()
bookshelf2Back.SetInputConnection(reader.GetOutputPort())
bookshelf2Back.SetExtent(13, 20, 19, 19, 0, 11)
mapBookshelf2Back = vtk.vtkPolyDataMapper()
mapBookshelf2Back.SetInputConnection(bookshelf2Back.GetOutputPort())
mapBookshelf2Back.ScalarVisibilityOff()
bookshelf2BackActor = vtk.vtkActor()
bookshelf2BackActor.SetMapper(mapBookshelf2Back)
bookshelf2BackActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2LHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2LHS.SetInputConnection(reader.GetOutputPort())
bookshelf2LHS.SetExtent(13, 20, 15, 19, 0, 0)
mapBookshelf2LHS = vtk.vtkPolyDataMapper()
mapBookshelf2LHS.SetInputConnection(bookshelf2LHS.GetOutputPort())
mapBookshelf2LHS.ScalarVisibilityOff()
bookshelf2LHSActor = vtk.vtkActor()
bookshelf2LHSActor.SetMapper(mapBookshelf2LHS)
bookshelf2LHSActor.GetProperty().SetColor(.8, .8, .6)
bookshelf2RHS = vtk.vtkStructuredGridGeometryFilter()
bookshelf2RHS.SetInputConnection(reader.GetOutputPort())
bookshelf2RHS.SetExtent(13, 20, 15, 19, 11, 11)
mapBookshelf2RHS = vtk.vtkPolyDataMapper()
mapBookshelf2RHS.SetInputConnection(bookshelf2RHS.GetOutputPort())
mapBookshelf2RHS.ScalarVisibilityOff()
bookshelf2RHSActor = vtk.vtkActor()
bookshelf2RHSActor.SetMapper(mapBookshelf2RHS)
bookshelf2RHSActor.GetProperty().SetColor(.8, .8, .6)
window = vtk.vtkStructuredGridGeometryFilter()
window.SetInputConnection(reader.GetOutputPort())
window.SetExtent(20, 20, 6, 13, 10, 13)
mapWindow = vtk.vtkPolyDataMapper()
mapWindow.SetInputConnection(window.GetOutputPort())
mapWindow.ScalarVisibilityOff()
windowActor = vtk.vtkActor()
windowActor.SetMapper(mapWindow)
windowActor.GetProperty().SetColor(.3, .3, .5)
outlet = vtk.vtkStructuredGridGeometryFilter()
outlet.SetInputConnection(reader.GetOutputPort())
outlet.SetExtent(0, 0, 9, 10, 14, 16)
mapOutlet = vtk.vtkPolyDataMapper()
mapOutlet.SetInputConnection(outlet.GetOutputPort())
mapOutlet.ScalarVisibilityOff()
outletActor = vtk.vtkActor()
outletActor.SetMapper(mapOutlet)
outletActor.GetProperty().SetColor(0, 0, 0)
inlet = vtk.vtkStructuredGridGeometryFilter()
inlet.SetInputConnection(reader.GetOutputPort())
inlet.SetExtent(0, 0, 9, 10, 0, 6)
mapInlet = vtk.vtkPolyDataMapper()
mapInlet.SetInputConnection(inlet.GetOutputPort())
mapInlet.ScalarVisibilityOff()
inletActor = vtk.vtkActor()
inletActor.SetMapper(mapInlet)
inletActor.GetProperty().SetColor(0, 0, 0)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(mapOutline)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create source for streamtubes
streamer = vtk.vtkStreamPoints()
streamer.SetInputConnection(reader.GetOutputPort())
streamer.SetStartPosition(0.1, 2.1, 0.5)
streamer.SetMaximumPropagationTime(500)
streamer.SetTimeIncrement(0.5)
streamer.SetIntegrationDirectionToForward()
cone = vtk.vtkConeSource()
cone.SetResolution(8)
cones = vtk.vtkGlyph3D()
cones.SetInputConnection(streamer.GetOutputPort())
cones.SetSourceConnection(cone.GetOutputPort())
cones.SetScaleFactor(0.5)
cones.SetScaleModeToScaleByVector()
mapCones = vtk.vtkPolyDataMapper()
mapCones.SetInputConnection(cones.GetOutputPort())
mapCones.SetScalarRange(reader.GetOutput().GetScalarRange())
conesActor = vtk.vtkActor()
conesActor.SetMapper(mapCones)
ren1.AddActor(table1Actor)
ren1.AddActor(table2Actor)
ren1.AddActor(FilingCabinet1Actor)
ren1.AddActor(FilingCabinet2Actor)
ren1.AddActor(bookshelf1TopActor)
ren1.AddActor(bookshelf1BottomActor)
ren1.AddActor(bookshelf1FrontActor)
ren1.AddActor(bookshelf1BackActor)
ren1.AddActor(bookshelf1LHSActor)
ren1.AddActor(bookshelf1RHSActor)
ren1.AddActor(bookshelf2TopActor)
ren1.AddActor(bookshelf2BottomActor)
ren1.AddActor(bookshelf2FrontActor)
ren1.AddActor(bookshelf2BackActor)
ren1.AddActor(bookshelf2LHSActor)
ren1.AddActor(bookshelf2RHSActor)
ren1.AddActor(windowActor)
ren1.AddActor(outletActor)
ren1.AddActor(inletActor)
ren1.AddActor(outlineActor)
ren1.AddActor(conesActor)
ren1.SetBackground(0.4, 0.4, 0.5)
aCamera = vtk.vtkCamera()
aCamera.SetClippingRange(0.7724, 39)
aCamera.SetFocalPoint(1.14798, 3.08416, 2.47187)
aCamera.SetPosition(-2.64683, -3.55525, 3.55848)
aCamera.SetViewUp(0.0511273, 0.132773, 0.989827)
aCamera.SetViewAngle(15.5033)
ren1.SetActiveCamera(aCamera)
renWin.SetSize(500, 300)
iren.Initialize()
#iren.Start()
|
ashray/VTK-EVM
|
Filters/Geometry/Testing/Python/officeStreamPoints.py
|
Python
|
bsd-3-clause
| 10,816
|
[
"VTK"
] |
a29fb65c7e070c46dfd8a722c86a0cfc9f165ab0e3f3f312a55bf69becfc1c61
|
import pybel
from datanator_query_python.util import mongo_util
import pymongo
import numpy as np
import multiprocessing as mp
import datanator.config.core
from datanator.util import chem_util
class CalcTanimoto(mongo_util.MongoUtil):
'''Calculating the Tanimoto similarity matrix
given two compound collections e.g.
ECMDB YMDB
'''
def __init__(self, cache_dirname=None, MongoDB=None, replicaSet=None, db=None,
verbose=True, max_entries=float('inf'), username=None,
password=None, authSource='admin'):
self.authSource = authSource
self.username = username
self.password = password
self.replicaSet = replicaSet
self.verbose = verbose
self.db = db
self.MongoDB = MongoDB
self.max_entries = max_entries
super().__init__(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=replicaSet,
db=db, verbose=verbose, max_entries=max_entries, username=username,
password=password, authSource=authSource)
log_handler = pybel.ob.OBMessageHandler()
log_handler.SetOutputLevel(0)
pybel.ob.obErrorLog.SetOutputLevel(0)
def get_tanimoto(self, mol1, mol2, str_format='inchi', rounding=3):
'''Calculates tanimoto coefficients between
two molecules, mol1 and mol2
Args:
mol1: molecule 1 in some format
mol2: molecule 2 in same format as molecule 1
str_format: format for molecular representation
supported formats are provided by Pybel
rounding: rounding of the final results
Return:
tani: rounded tanimoto coefficient
'''
try:
inchi = [mol1, mol2]
mols = [pybel.readstring(str_format, x) for x in inchi]
fps = [x.calcfp() for x in mols]
return round((fps[0] | fps[1]), rounding)
except TypeError:
return -1
def one_to_many(self, inchi, collection_str='metabolites_meta',
field='inchi', lookup='InChI_Key', num=100):
''' Calculate tanimoto coefficients between one
metabolite and the rest of the 'collection_str'
Args:
inchi: chosen chemical compound in InChI format
collection_str: collection in which comparisons are made
field: field that has the chemical structure
lookup: field that had been previous indexed
num: max number of compounds to be returned, sorted by tanimoto
Returns:
sorted_coeff: sorted numpy array of top num tanimoto coeff
sorted_inchi: sorted top num inchi
'''
col = self.db_obj[collection_str]
coeff_np = np.empty([0])
top_inchi = []
np_size = 0
projection = {field: 1, lookup: 1}
cursor = col.find({}, projection=projection)
count = col.count_documents({})
total = min(count, self.max_entries)
i = 0
while (np_size < num): # fill in first num tanimoto coefficients
mol2 = cursor[i][field]
hash2 = cursor[i][lookup]
tanimoto = self.get_tanimoto(inchi, mol2)
if tanimoto < 1:
coeff_np = np.append(coeff_np, tanimoto)
top_inchi.append(hash2)
np_size += 1
i += 1
else:
i +=1
coeff_min = np.amin(coeff_np)
min_index = np.argmin(coeff_np)
i = 0
j = 0
for doc in cursor[num:]: # iterate through the rest of the documents
if i > self.max_entries:
break
if self.verbose and j % 200 == 0:
print(' Calculating between given and doc {} out of {} in collection {}'.format(
j + num, total, collection_str))
mol2 = doc[field]
hash2 = doc[lookup]
tanimoto = self.get_tanimoto(inchi, mol2)
if tanimoto > coeff_min and tanimoto < 1:
np.put(coeff_np, min_index, tanimoto)
top_inchi[min_index] = hash2
# update min coeff information
coeff_min = np.amin(coeff_np)
min_index = np.argmin(coeff_np)
i += 1
j += 1
else:
j += 1
indices = np.argsort(coeff_np)
sorted_inchi = []
for x in (indices[::-1]):
sorted_inchi.append(top_inchi[x])
sorted_coeff = np.sort(coeff_np)[::-1]
return sorted_coeff, sorted_inchi
def many_to_many(self, collection_str1='metabolites_meta',
collection_str2='metabolites_meta', field1='inchi',
field2='inchi', lookup1='InChI_Key',
lookup2='InChI_Key', num=100):
''' Go through collection_str and assign each
compound top 'num' amount of most similar
compounds
Args:
collection_str1: collection in which compound is drawn
collection_str2: collection in which comparison is made
field1: field of interest in collection_str1
field2: filed of interest in collection_str2
num: number of most similar compound
batch_size: batch_size for each server round trip
'''
src = mongo_util.MongoUtil(
MongoDB=self.MongoDB,
username=self.username, password=self.password,
authSource=self.authSource)
db_obj = src.client[self.db]
final = db_obj[collection_str1]
projection = {'m2m_id':0, 'ymdb_id': 0, 'kinlaw_id': 0,
'reaction_participants': 0, 'synonyms': 0}
col = src.client["datanator"]["metabolites_meta"]
count = col.count_documents({})
total = min(count, self.max_entries)
''' The rest of the code in this function is to force
a cursor refresh every 'limit' number of documents
because no_cursor_timeout option in pymongo's find()
function is not working as intended
'''
def process_doc(doc, final, i, total = total, collection_str1 = collection_str1,
field1 = field1, lookup1 = lookup1, collection_str2 = collection_str2,
field2 = field2, lookup2 = lookup2):
# if 'similar_compounds_corrected' in doc:
# if self.verbose and i % 10 ==0:
# print('Skipping document {} out of {} in collection {}'.format(
# i, total, collection_str1))
# return
if i > self.max_entries:
return
if self.verbose and i % 1 == 0:
print('Going through document {} out of {} in collection {}'.format(
i, total, collection_str1))
print(doc[field1])
compound = doc[field1]
coeff, inchi_hashed = self.one_to_many(compound, lookup=lookup2,
collection_str=collection_str2, field=field2, num=num)
result = []
for a, b in zip(coeff, inchi_hashed):
dic = {}
dic[b] = a
result.append(dic)
final.update_one({lookup1: doc[lookup1]},
{'$set': {'similar_compounds_corrected': result}},
upsert=False)
limit = 100 # number of documents from the cursor to be stuffed into a list
sorted_field = lookup1 # indexed field used to sort cursor
i = 0
documents = list(col.find({}, projection = projection).sort(sorted_field, pymongo.ASCENDING).limit(limit))
for doc in documents:
process_doc(doc, final, i)
i += 1
is_last_batch = False
while not is_last_batch:
cursor = col.find({sorted_field: {'$gt': documents[-1][sorted_field]}}, projection = projection)
documents = list(cursor.sort(sorted_field, pymongo.ASCENDING).limit(limit))
is_last_batch = False if len(documents) == limit else True
for doc in documents:
process_doc(doc, final, i)
i += 1
def main():
db = 'datanator'
username = datanator.config.core.get_config()['datanator']['mongodb']['user']
password = datanator.config.core.get_config()['datanator']['mongodb']['password']
server = datanator.config.core.get_config()['datanator']['mongodb']['server']
port = datanator.config.core.get_config()['datanator']['mongodb']['port']
replSet = datanator.config.core.get_config()['datanator']['mongodb']['replSet']
manager = CalcTanimoto(
MongoDB=server, replicaSet=replSet, db=db,
verbose=True, password=password, username=username)
chem_manager = chem_util.ChemUtil()
# manager.many_to_many(field1 = 'inchi', field2 = 'inchi')
# one-time update for adp
inchi = 'InChI=1S/C10H15N5O10P2/c11-8-5-9(13-2-12-8)15(3-14-5)10-7(17)6(16)4(24-10)1-23-27(21,22)25-26(18,19)20/h2-4,6-7,10,16-17H,1H2,(H,21,22)(H2,11,12,13)(H2,18,19,20)/t4-,6-,7-,10-/m1/s1'
sorted_coeff, sorted_inchi = manager.one_to_many(inchi)
_, _, collection = manager.con_db('metabolites_meta')
for key, val in zip(sorted_inchi, sorted_coeff):
dic = {key: val}
collection.update_one({'inchi': inchi},
{'$push': {'similar_compounds': dic} })
if __name__ == '__main__':
main()
|
KarrLab/kinetic_datanator
|
datanator/util/calc_tanimoto.py
|
Python
|
mit
| 9,777
|
[
"Pybel"
] |
9b31c3a5d86538b79eafd6fc1edec61eadc46b04e2f9a8d7e9d46bcf38e923bc
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageEuclideanDistance(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageEuclideanDistance(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageEuclideanDistance.py
|
Python
|
bsd-3-clause
| 507
|
[
"VTK"
] |
b8c70f2a9a0ae9d20c605f5f92537c030cec40a4565ccd92482102e8c6b38575
|
# -*- coding: utf-8 -*-
# Copyright 2012 Jerry Peng
#
# Tomate is a time management tool inspired by the
# pomodoro technique(http://www.pomodorotechnique.com/).
#
# Tomate is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option
# any later version.
#
# Tomate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with Foobar. If not, see http://www.gnu.org/licenses/.
import random
_quote_src = '''
Better three hours too soon, than one minute too late. --William Shakespeare
Time is the wisest counselor of all. --Pericles
Time is the school in which we learn, time is the fire in which we burn. --Delmore Schwartz
Nothing is a waste of time if you use the experience wisely. --Rodin
Time as he grows old teaches many lessons. --Aeschylus
Histories make men wise. --Francis Bacon
Time is what we want most, but what we use worst. --William Penn
The common man is not concerned about the passage of time, the man of talent is driven by it. --Shoppenhauer
Time = life; therefore, waste your time and waste of your life, or master your time and master your life. --Alan Lakein
Don’t be fooled by the calendar. There are only as many days in the year as you make use of. One man gets only a week’s value out of a year while another man gets a full year’s value out of a week. --Charles Richards
The key is in not spending time, but in investing it. --Stephen R. Covey
Ordinary people think merely of spending time. Great people think of using it. --Author Unknown
Determine never to be idle. No person will have occasion to complain of the want of time who never loses any. It is wonderful how much can be done if we are always doing. --Thomas Jefferson
Make use of time, let not advantage slip. --William Shakespeare
This time, like all times, is a very good one, if we but know what to do with it. --Ralph Waldo Emerson
A man who dares to waste one hour of life has not discovered the value of life. --Charles Darwin
Dost thou love life? Then do not squander time, for that is the stuff life is made of. --Benjamin Franklin
Once you have mastered time, you will understand how true it is that most people overestimate what they can accomplish in a year – and underestimate what they can achieve in a decade! --Anthony Robbins
If you want to make good use of your time, you’ve got to know what’s most important and then give it all you’ve got. --Lee Iacocca
It’s not enough to be busy, so are the ants. The question is, what are we busy about? --Henry David Thoreau
Take care of the minutes and the hours will take care of themselves. --Lord Chesterfield
You’re writing the story of your life one moment at a time. --Doc Childre and Howard Martin
To do two things at once is to do neither. --Publius Syrus
One cannot manage too many affairs: like pumpkins in the water, one pops up while you try to hold down the other. --Chinese Proverb
Never let yesterday use up today. --Richard H. Nelson
I don’t think of the past. The only thing that matters is the everlasting present. --W. Somerset Maugham
It’s how we spend our time here and now, that really matters. If you are fed up with the way you have come to interact with time, change it. --Marcia Wieder
Realize that now, in this moment of time, you are creating. You are creating your next moment. That is what’s real. --Sara Paddison
The time for action is now. It’s never too late to do something. --Carl Sandburg
You cannot do a kindness too soon, for you never know how soon it will be too late. --Ralph Waldo Emerson
Whether it’s the best of times or the worst of times, it’s the only time we’ve got. --Art Buchwald
He lives long that lives well; and time misspent is not lived but lost. --Thomas Fuller
He who know most grieves most for wasted time. --Dante
Lost wealth may be replaced by industry, lost knowledge by study, lost health by temperance or medicine, but lost time is gone forever. --Samuel Smiles
Money, I can only gain or lose. But time I can only lose. So, I must spend it carefully. --Author Unknown
One thing you can’t recycle is wasted time. --Author Unknown
Lost time is never found again. --Proverb
All that really belongs to us is time; even he who has nothing else has that. --Baltasar Gracian
Time is the most valuable thing a man can spend. --Theophrastus
Time is money. --Benjamin Franklin
Gaining time is gaining everything in love, trade and war. --John Shebbeare
Until you value yourself, you will not value your time. Until you value your time, you will not do anything with it. --M. Scott Peck
Your greatest resource is your time. --Brian Tracy
You cannot kill time without injuring eternity. --Henry David Thoreau
Time is the most valuable thing a man can spend. --Laertius Diogenes
Time is at once the most valuable and the most perishable of all our possessions. --John Randolph
Time is really the only capital that any human being has, and the only thing he can’t afford to lose. --Thomas Edison
Until we can manage time, we can manage nothing else. --Peter F. Drucker
What may be done at any time will be done at no time. --Scottish Proverb
A wise person does at once, what a fool does at last. Both do the same thing; only at different times. --Baltasar Gracian
One worthwhile task carried to a successful conclusion is worth half-a-hundred half-finished tasks. --Malcolm S. Forbes
To think too long about doing a thing often becomes its undoing. --Eva Young
A year from now you will wish you had started today. --Karen Lamb
The surest way to be late is to have plenty of time. --Leo Kennedy
While we are postponing, life speeds by. --Seneca
You may delay, but time will not. --Benjamin Franklin
Never leave ’till tomorrow which you can do today. --Benjamin Franklin
You will never “find” time for anything. If you want time, you must make it. --Charles Bruxton
Don’t say you don’t have enough time. You have exactly the same number of hours per day that were given to Helen Keller, Pasteur, Michelangelo, Mother Teresa, Leonardo da Vinci, Thomas Jefferson, and Albert Einstein. --H. Jackson Brown
The bad news is time flies. The good news is you’re the pilot. --Michael Altshuler
Time is the coin of your life. It is the only coin you have, and only you can determine how it will be spent. Be careful lest you let other people spend it for you. --Carl Sandburg
I am definitely going to take a course on time management… just as soon as I can work it into my schedule. --Louis E. Boone
In truth, people can generally make time for what they choose to do; it is not really the time but the will that is lacking. --Sir John Lubbock
Those who make the worse use of their time are the first to complain of its shortness. --Jean De La Bruyere
The great dividing line between success and failure can be expressed in five words: “I did not have time.” --Franklin Field
'''
def _parse_quotes():
quotes = []
for l in _quote_src.splitlines():
if not l:
continue
p = l.find('--')
if p > 0:
quotes.append((l[:p].strip(), l[p+2:].strip()))
else:
quotes.append((l.strip(), 'Author Unknown'))
return tuple(quotes)
_quotes = _parse_quotes()
def random_quote():
return random.choice(_quotes)
if __name__ == '__main__':
print random_quote()
|
moonranger/tomate
|
tomate/timequotes.py
|
Python
|
gpl-3.0
| 7,739
|
[
"Brian"
] |
e170e08e63a02a94d9699b18d959ed0517cd52c8a91012d99e05a45735f83711
|
#!/usr/bin/env python
"""
An animated image
"""
from __future__ import (absolute_import, division, print_function)
import netCDF4 as nc4
import numpy as np
import pysgrid
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FFMpegWriter
from pysgrid.processing_2d import rotate_vectors, vector_sum
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
"""
~~HOW TO USE THIS~~
The four lines below are the primary 'controls'.
`url`: filename or URL of dataset
`lons,lats` mesh of points that cover the specified bounds
`maxslice` The number of slices from 0 to turn into an animation
`fps` The number of frames in between each data slice.
This is a WIP. Reader beware.
"""
# url = ('C:\Users\Jay.Hennen\Documents\Code\pygnome\py_gnome\scripts\script_curv_field\TBOFS.nc')
# lons, lats = np.mgrid[-82.8:-82.5:600j, 27.5:27.75:600j]
url = ('http://geoport-dev.whoi.edu/thredds/dodsC/clay/usgs/users/zdefne/run076/his/00_dir_roms_display.ncml') # noqa
lons, lats = np.mgrid[-74.38:-74.26:600j, 39.45:39.56:600j]
# lons, lats = np.mgrid[-82.8:-82.5:600j, 27.5:27.75:600j]
maxslice = 3
fps = 10
def interpolated_velocities(grid, points, ind, timeobj, tindex, u, v, depth=-1.):
'''
Finds velocities at the points at the time specified, interpolating in 2D
over the u and v grids to do so.
:param time: The time in the simulation
:param points: a numpy array of points that you want to find interpolated velocities for
:param indices: Numpy array of indices of the points, if already known.
:return: interpolated velocities at the specified points
'''
t_alphas = timeobj.ialphas(tindex)
t_index = int(np.floor(tindex))
mem = True
_hash = grid._hash_of_pts(points)
u0 = grid.interpolate_var_to_points(points,
grid.u,
slices=[t_index, -1],
memo=mem,
_hash=_hash)
u1 = grid.interpolate_var_to_points(points,
grid.u,
slices=[t_index + 1, -1],
memo=mem, _hash=_hash)
v0 = grid.interpolate_var_to_points(points,
grid.v,
slices=[t_index, -1],
memo=mem,
_hash=_hash)
v1 = grid.interpolate_var_to_points(points,
grid.v,
slices=[t_index + 1, -1],
memo=mem,
_hash=_hash)
u_vels = u0 + (u1 - u0) * t_alphas
v_vels = v0 + (v1 - v0) * t_alphas
return u_vels, v_vels
class Time(object):
def __init__(self, data, base_dt_str=None):
"""
:param data: A netCDF, biggus, or dask source for time data
:return:
"""
self.time = nc4.num2date(data[:], units=data.units)
def ialphas(self, index):
'''
given a floating point index between 0 and max index,
give interpolation alphas for that time
'''
i0 = np.floor(index)
i1 = np.ceil(index)
frac = index - i0
return frac
t0 = self.time[i0]
t1 = self.time[i1]
if i0 == i1:
return t0
else:
return t0 * frac + t1 * (1 - frac)
def time_str(self, index):
i0 = np.floor(index)
i1 = np.ceil(index)
frac = index - i0
t0 = self.time[i0]
t1 = self.time[i1]
time = t0 + timedelta(seconds=(t1 - t0).total_seconds() * frac)
return time.strftime('%c')
def f(time):
'''
time: float index
'''
vels = interpolated_velocities(sgrid,
points,
timeobj,
time,
sgrid.u,
sgrid.v,
# u_alphas,
# v_alphas,
# u_ind,
# v_ind,
)
u_rot = vels[:, 0]
v_rot = vels[:, 1]
u_rot, v_rot = rotate_vectors(u_rot, v_rot, angles)
u_rot = u_rot.reshape(600, -1)
v_rot = v_rot.reshape(600, -1)
uv_vector_sum = vector_sum(u_rot, v_rot)
return uv_vector_sum
def make_map(projection=ccrs.PlateCarree(), figsize=(9, 9)):
fig, ax = plt.subplots(figsize=figsize,
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
nc = nc4.Dataset(url)
timeobj = sgrid = None
if ('ocean_time' in nc.variables.keys()):
timeobj = Time(nc['ocean_time'])
else:
timeobj = Time(nc['time'])
if 'grid' in nc.variables.keys():
sgrid = pysgrid.load_grid(nc)
else:
sgrid = pysgrid.SGrid(node_lon=nc['lon_psi'],
node_lat=nc['lat_psi'],
edge1_lon=nc['lon_u'],
edge1_lat=nc['lat_u'],
edge2_lon=nc['lon_v'],
edge2_lat=nc['lat_v'],
)
sgrid.u = pysgrid.variables.SGridVariable(data=nc['u'])
sgrid.v = pysgrid.variables.SGridVariable(data=nc['v'])
sgrid.angles = pysgrid.variables.SGridVariable(data=nc['angle'])
points = np.stack((lons, lats), axis=-1).reshape(-1, 2)
ind = sgrid.locate_faces(points)
ang_ind = ind + [1, 1]
angles = sgrid.angles[:][ang_ind[:, 0], ang_ind[:, 1]]
# ims is a list of lists, each row is a list of artists to draw in the
# current frame; here we are just animating one artist, the image, in
# each frame
fig, ax = make_map()
print(fig)
print(ax)
index = 0
ax.coastlines('10m')
t = np.linspace(0, maxslice, maxslice * fps)
cs = qv = tl = None
time_str = timeobj.time_str(0)
tl = ax.text(0, 1, time_str, bbox=dict(
facecolor='white', alpha=0.8), transform=ax.transAxes)
def gen_map(k):
global t, index, cs, qv, tl, timeobj
tindex = t[index]
if cs is not None:
cs.remove()
qv.remove()
time_str = timeobj.time_str(tindex)
tl.set_text(time_str)
mscale = 1
vscale = 15
scale = 0.04
lon_data = lons
lat_data = lats
print(tindex)
print(time_str)
u_rot, v_rot = interpolated_velocities(sgrid, points, ind, timeobj, tindex, sgrid.u, sgrid.v)
u_rot, v_rot = rotate_vectors(u_rot, v_rot, angles)
u_rot = u_rot.reshape(600, -1)
v_rot = v_rot.reshape(600, -1)
uv_vector_sum = vector_sum(u_rot, v_rot)
kw = dict(scale=1.0 / scale, pivot='middle', width=0.003, color='black')
cs = plt.pcolormesh(lon_data[::mscale, ::mscale],
lat_data[::mscale, ::mscale],
uv_vector_sum[::mscale, ::mscale], zorder=1, cmap=plt.cm.rainbow)
qv = plt.quiver(lon_data[::vscale, ::vscale], lat_data[::vscale, ::vscale],
u_rot[::vscale, ::vscale], v_rot[::vscale, ::vscale], zorder=2, **kw)
index += 1
return cs, qv, tl
print('creating animation')
ani = animation.FuncAnimation(fig,
gen_map,
frames=maxslice * fps - 1,
interval=100,
blit=True,
repeat=False)
writer = FFMpegWriter(fps=fps, bitrate=1500)
# plt.show()
print('saving')
ani.save('currents_movie.mp4', writer=writer)
print('done')
|
NOAA-ORR-ERD/pysgrid
|
demos/matlabanim.py
|
Python
|
bsd-3-clause
| 7,903
|
[
"NetCDF"
] |
e82e65ec067b287f33ad759d79af1d5bdd69ae422054629661c41cc6cdea969a
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import sys
import os
if __name__ == "__main__":
# if settings.py is run manually, we should show addon settings.
if hasattr(sys, "argv") and len(sys.argv) > 1:
print sys.argv
# show the requested settings
addonId = sys.argv[-1]
callerPath = sys.argv[0]
# if callerPath is an XBox path, we need to use paths and point to
# the correct \channels\<scriptname>.channel.<name>\ folder.
# XBMC4Xbox Log: NOTICE: ['Q:\\scripts\\XOT-Uzg.v3\\resources\\libs\\settings.py', 'net.rieter.xot']
if (callerPath.lower().startswith("q:")):
isXbox = True
else:
isXbox = False
if (not isXbox):
import xbmcaddon
settings = xbmcaddon.Addon(addonId)
else:
import xbmc
print "Handling Xbox stuff"
callerPath = callerPath.replace("\\\\", "\\")
print "CallerPath: %s" % (callerPath,)
scriptPath = callerPath.replace("\\resources\\libs\\settings.py", "")
print "ScriptPath: %s" % (scriptPath,)
scriptName = os.path.split(scriptPath)[-1]
print "ScriptName: %s" % (scriptName,)
channelName = addonId[addonId.find(".channel.") + 1:]
print "ChannelName: %s" % (channelName,)
# 22:25:13 M: 39600128 NOTICE: CallerPath: Q:\scripts\XOT-Uzg.v3\resources\libs\settings.py
# 22:25:13 M: 39600128 NOTICE: ScriptPath: Q:\scripts\XOT-Uzg.v3
# 22:25:13 M: 39600128 NOTICE: ScriptName: XOT-Uzg.v3
channelPath = os.path.join(scriptPath, "channels", "%s.%s" % (scriptName, channelName))
settings = xbmc.Settings(path=channelName)
# now open the settings
settings.openSettings()
# perhaps re-open the XOT settings?
# exit not
sys.exit(0)
print "We should not get here."
import re
from logger import Logger
from helpers import database
def CleanupXml(xmlDoc):
"""Cleans up XML to make it look pretty
Arguments:
xmlDoc : string - the XML to cleanup
"""
# cleanup
prettyXml = xmlDoc.toprettyxml()
# remove not needed lines with only whitespaces
prettyXml = re.sub("(?m)^\s+[\n\r]", "", prettyXml,)
prettyXml = re.sub("[\n\r]+\t+([^<\t]+)[\n\r]+\t+", "\g<1>", prettyXml)
return prettyXml
def LoadFavorites(channel):
"""Reads the channel favorites into items.
Arguments:
channel : Channel - The channel for which the favorites need to be loaded.
Returns:
list of MediaItems that were marked as favorites.
"""
try:
db = database.DatabaseHandler()
items = db.LoadFavorites(channel)
for item in items:
item.icon = channel.icon
except:
Logger.Error("Settings :: Error loading favorites", exc_info=True)
return items
def AddToFavorites(item, channel):
"""Adds an items to the favorites
Arguments:
item : MediaItem - The MediaItem to add as favorite.
channel : Channel - The channel for which the favorites need to be loaded.
"""
if item.url == "":
Logger.Warning("Settings :: Cannot add favorite without URL")
return
try:
db = database.DatabaseHandler()
db.AddFavorite(item.name, item.url, channel)
except:
Logger.Error("Settings :: Error adding favorites", exc_info=True)
def RemoveFromFavorites(item, channel):
"""Removes an item from the favorites
Arguments:
item : MediaItem - The MediaItem to be removed
channel : Channel - The channel for which it needs to be removed.
"""
try:
db = database.DatabaseHandler()
db.DeleteFavorites(item.name, item.url, channel)
except:
Logger.Error("Settings :: Error removing from favorites", exc_info=True)
return
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/settings.py
|
Python
|
gpl-2.0
| 4,602
|
[
"VisIt"
] |
df7cf8b18fa6d7223929e8f4529d86f8064d6db59c9d593029e1a0d98688a9ce
|
"""
:mod: Pfn
.. module: Pfn
:synopsis: pfn URI (un)parsing
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
# # imports
import os
from urllib import parse
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
def pfnunparse(pfnDict, srmSpecific=True):
"""Wrapper for backward compatibility
Redirect either to the old hand made style of unparsing
the pfn, which works for srm, or to the standard one
which seems to work for the rest
:param srmSpecific: use the srm specific parser (default True)
"""
if srmSpecific:
return srm_pfnunparse(pfnDict)
return default_pfnunparse(pfnDict)
def srm_pfnunparse(pfnDict):
"""
Create PFN URI from pfnDict
:param dict pfnDict:
"""
# # make sure all keys are in
allDict = dict.fromkeys(["Protocol", "Host", "Port", "WSUrl", "Path", "FileName"], "")
if not isinstance(pfnDict, dict):
return S_ERROR("pfnunparse: wrong type for pfnDict argument, expected a dict, got %s" % type(pfnDict))
allDict.update(pfnDict)
pfnDict = allDict
# # c
# # /a/b/c
filePath = os.path.normpath("/" + pfnDict["Path"] + "/" + pfnDict["FileName"]).replace("//", "/")
# # host
uri = pfnDict["Host"]
if pfnDict["Host"]:
if pfnDict["Port"]:
# host:port
uri = "%s:%s" % (pfnDict["Host"], pfnDict["Port"])
if pfnDict["WSUrl"]:
if "?" in pfnDict["WSUrl"] and "=" in pfnDict["WSUrl"]:
# host/wsurl
# host:port/wsurl
uri = "%s%s" % (uri, pfnDict["WSUrl"])
else:
# host/wsurl
# host:port/wsurl
uri = "%s%s?=" % (uri, pfnDict["WSUrl"])
if pfnDict["Protocol"]:
if uri:
# proto://host
# proto://host:port
# proto://host:port/wsurl
uri = "%s://%s" % (pfnDict["Protocol"], uri)
else:
# proto:
uri = "%s:" % pfnDict["Protocol"]
pfn = "%s%s" % (uri, filePath)
# c
# /a/b/c
# proto:/a/b/c
# proto://host/a/b/c
# proto://host:port/a/b/c
# proto://host:port/wsurl/a/b/c
return S_OK(pfn)
def default_pfnunparse(pfnDict):
"""
Create PFN URI from pfnDict
:param dict pfnDict:
"""
try:
if not isinstance(pfnDict, dict):
return S_ERROR("pfnunparse: wrong type for pfnDict argument, expected a dict, got %s" % type(pfnDict))
allDict = dict.fromkeys(["Protocol", "Host", "Port", "Path", "FileName", "Options"], "")
allDict.update(pfnDict)
scheme = allDict["Protocol"]
netloc = allDict["Host"]
if allDict["Port"]:
netloc += ":%s" % allDict["Port"]
path = os.path.join(allDict["Path"], allDict["FileName"])
query = allDict["Options"]
pr = parse.ParseResult(scheme=scheme, netloc=netloc, path=path, params="", query=query, fragment="")
pfn = pr.geturl()
return S_OK(pfn)
except Exception as e: # pylint: disable=broad-except
errStr = "Pfn.default_pfnunparse: Exception while unparsing pfn: %s" % pfnDict
gLogger.exception(errStr, lException=e)
return S_ERROR(errStr)
def pfnparse(pfn, srmSpecific=True):
"""Wrapper for backward compatibility
Redirect either to the old hand made style of parsing
the pfn, which works for srm, or to the standard one
which seems to work for the rest
:param srmSpecific: use the srm specific parser (default True)
"""
if srmSpecific:
return srm_pfnparse(pfn)
return default_pfnparse(pfn)
def srm_pfnparse(pfn):
"""
Parse pfn and save all bits of information into dictionary
:param str pfn: pfn string
"""
if not pfn:
return S_ERROR("wrong 'pfn' argument value in function call, expected non-empty string, got %s" % str(pfn))
pfnDict = dict.fromkeys(["Protocol", "Host", "Port", "WSUrl", "Path", "FileName"], "")
try:
if ":" not in pfn:
# pfn = /a/b/c
pfnDict["Path"] = os.path.dirname(pfn)
pfnDict["FileName"] = os.path.basename(pfn)
else:
# pfn = protocol:/a/b/c
# pfn = protocol://host/a/b/c
# pfn = protocol://host:port/a/b/c
# pfn = protocol://host:port/wsurl?=/a/b/c
pfnDict["Protocol"] = pfn[0 : pfn.index(":")]
# # remove protocol:
pfn = pfn[len(pfnDict["Protocol"]) :]
# # remove :// or :
pfn = pfn[3:] if pfn.startswith("://") else pfn[1:]
if pfn.startswith("/"):
# # /a/b/c
pfnDict["Path"] = os.path.dirname(pfn)
pfnDict["FileName"] = os.path.basename(pfn)
else:
# # host/a/b/c
# # host:port/a/b/c
# # host:port/wsurl?=/a/b/c
if ":" not in pfn:
# # host/a/b/c
pfnDict["Host"] = pfn[0 : pfn.index("/")]
pfn = pfn[len(pfnDict["Host"]) :]
pfnDict["Path"] = os.path.dirname(pfn)
pfnDict["FileName"] = os.path.basename(pfn)
else:
# # host:port/a/b/c
# # host:port/wsurl?=/a/b/c
pfnDict["Host"] = pfn[0 : pfn.index(":")]
# # port/a/b/c
# # port/wsurl?=/a/b/c
pfn = pfn[len(pfnDict["Host"]) + 1 :]
pfnDict["Port"] = pfn[0 : pfn.index("/")]
# # /a/b/c
# # /wsurl?=/a/b/c
pfn = pfn[len(pfnDict["Port"]) :]
WSUrl = pfn.find("?")
WSUrlEnd = pfn.find("=")
if WSUrl == -1 and WSUrlEnd == -1:
# # /a/b/c
pfnDict["Path"] = os.path.dirname(pfn)
pfnDict["FileName"] = os.path.basename(pfn)
else:
# # /wsurl?blah=/a/b/c
pfnDict["WSUrl"] = pfn[0 : WSUrlEnd + 1]
# # /a/b/c
pfn = pfn[len(pfnDict["WSUrl"]) :]
pfnDict["Path"] = os.path.dirname(pfn)
pfnDict["FileName"] = os.path.basename(pfn)
return S_OK(pfnDict)
except Exception: # pylint: disable=broad-except
errStr = "Pfn.srm_pfnparse: Exception while parsing pfn: " + str(pfn)
gLogger.exception(errStr)
return S_ERROR(errStr)
def default_pfnparse(pfn):
"""
Parse pfn and save all bits of information into dictionary
:param str pfn: pfn string
"""
if not pfn:
return S_ERROR("wrong 'pfn' argument value in function call, expected non-empty string, got %s" % str(pfn))
pfnDict = dict.fromkeys(["Protocol", "Host", "Port", "WSUrl", "Path", "FileName"], "")
try:
parsed = parse.urlparse(pfn)
pfnDict["Protocol"] = parsed.scheme
if ":" in parsed.netloc:
pfnDict["Host"], pfnDict["Port"] = parsed.netloc.split(":")
else:
pfnDict["Host"] = parsed.netloc
pfnDict["Path"] = os.path.dirname(parsed.path)
pfnDict["FileName"] = os.path.basename(parsed.path)
if parsed.query:
pfnDict["Options"] = parsed.query
return S_OK(pfnDict)
except Exception as e: # pylint: disable=broad-except
errStr = "Pfn.default_pfnparse: Exception while parsing pfn: " + str(pfn)
gLogger.exception(errStr, lException=e)
return S_ERROR(errStr)
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/Pfn.py
|
Python
|
gpl-3.0
| 7,782
|
[
"DIRAC"
] |
cfb7d31058a385e14b30ebadaea79d740e12fa78b4f1b17dcd017785e32640b8
|
# $HeadURL$
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.Client.Logger import Logger
class SubSystemLogger( Logger ):
def __init__( self, subName, masterLogger, child = True ):
Logger.__init__( self )
self.__child = child
self._minLevel = masterLogger._minLevel
for attrName in dir( masterLogger ):
attrValue = getattr( masterLogger, attrName )
if isinstance( attrValue, basestring ):
setattr( self, attrName, attrValue )
self.__masterLogger = masterLogger
self._subName = subName
def getSubName(self):
"""
Return the name of the sublogger
"""
return self._subName
def processMessage( self, messageObject ):
if self.__child:
messageObject.setSubSystemName( self._subName )
else:
messageObject.setSystemName( self._subName )
self.__masterLogger.processMessage( messageObject )
|
Andrew-McNab-UK/DIRAC
|
FrameworkSystem/private/logging/SubSystemLogger.py
|
Python
|
gpl-3.0
| 872
|
[
"DIRAC"
] |
51a2a2dbd5b9a9f392b519b5645e3673e9a0ce280d7248a6a249f5ba18806dd3
|
#%% DEMO 03: Generate sample data and add realistic CT noise to it.
#
# This demo will show how to generate sample data for image reconstruction
#
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Ander Biguri
# --------------------------------------------------------------------------
#%%
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
geo = tigre.geometry_default(high_resolution=False)
#%% Define angles of projection and load phatom image
# define projection angles (in radians)
angles = np.linspace(0, 2 * np.pi, 50)
# load phatnom image
head = sample_loader.load_head_phantom(geo.nVoxel)
# Simulate forward projection.
# To match with mathematical notation, the projection operation is called Ax
projections = tigre.Ax(head, geo, angles)
# Add realistic noise. Adds photon scattering noise ('Poisson') and
# electronic noise of the detector ('Gaussian').
#
# 'Poisson' is related to the maximum photon count in the detector. 1e5 is
# a standard clinical nuber, reduce it for more noise
# 'Gaussian' is related to possible electronic noise in the detector. mean
# of 0 and std of 10 is common in clinical scenario. Increase std for more
# noise.
noise_projections = CTnoise.add(projections, Poisson=1e5, Gaussian=np.array([0, 10]))
#%% Plot Projections
tigre.plotproj(projections)
# plot noise
tigre.plotproj(projections - noise_projections)
|
CERN/TIGRE
|
Python/demos/d03_generateData.py
|
Python
|
bsd-3-clause
| 2,012
|
[
"Gaussian"
] |
be23e811720fbc937937cea7179de54749bd09033d7f923af614a3ee9317fab0
|
#
# Copyright (c) Brian C. Welch. www.brianwelch.se
# All rights reserved.
# Licensed under the MIT License.
# Persons are free to reproduce this code provided
# they do not removed this header
# + + + + + + + + + + + + + + + +
def binary_calc(digit_in):
'''converting an integer into a binary'''
binary_digit1 = []
output = ""
if (digit_in == 0):
output = 0
else:
while digit_in > 0:
binar = digit_in%2
binary_digit1 += str(binar)
digit_in = int(digit_in / 2)
binary_length = int(len(binary_digit1))
while binary_length > 0:
output += binary_digit1[binary_length - 1]
binary_length = binary_length - 1
return output
|
brian-welch/Code-Snippets
|
binary_number_convertor.py
|
Python
|
mit
| 739
|
[
"Brian"
] |
cb73206dfce5a18d9789c9c65ca77d0f96157d9487c58d1e4379a550a9a7c2cc
|
from __future__ import absolute_import
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
# material function
def get_mat(coors, mode, pb):
if mode == 'qp':
cnf = pb.conf
# get material coefficients
if hasattr(cnf, 'opt_data'):
# from optim.
E_f, nu_f, E_m, nu_m = cnf.opt_data['mat_params']
else:
# given values
E_f, nu_f, E_m, nu_m = 160.e9, 0.28, 5.e9, 0.45
nqp = coors.shape[0]
nel = pb.domain.mesh.n_el
nqpe = nqp // nel
out = nm.zeros((nqp, 6, 6), dtype=nm.float64)
# set values - matrix
D_m = stiffness_from_youngpoisson(3, E_m, nu_m)
Ym = pb.domain.regions['Ym'].get_cells()
idx0 = (nm.arange(nqpe)[:,nm.newaxis] * nm.ones((1, Ym.shape[0]),
dtype=nm.int32)).T.flatten()
idxs = (Ym[:,nm.newaxis] * nm.ones((1, nqpe),
dtype=nm.int32)).flatten() * nqpe
out[idxs + idx0,...] = D_m
# set values - fiber
D_f = stiffness_from_youngpoisson(3, E_f, nu_f)
Yf = pb.domain.regions['Yf'].get_cells()
idx0 = (nm.arange(nqpe)[:,nm.newaxis] * nm.ones((1, Yf.shape[0]),
dtype=nm.int32)).T.flatten()
idxs = (Yf[:,nm.newaxis] * nm.ones((1, nqpe),
dtype=nm.int32)).flatten() * nqpe
out[idxs + idx0,...] = D_f
return {'D': out}
def optimization_hook(pb):
cnf = pb.conf
out = []
yield pb, out
if hasattr(cnf, 'opt_data'):
# store homogenized tensor
pb.conf.opt_data['D_homog'] = out[-1].D.copy()
yield None
def define(is_opt=False):
filename_mesh = data_dir + '/meshes/3d/matrix_fiber_rand.vtk'
mesh = Mesh.from_file(filename_mesh)
bbox = mesh.get_bounding_box()
regions = {
'Y' : 'all',
'Ym' : ('cells of group 7', 'cell'),
'Yf' : ('r.Y -c r.Ym', 'cell'),
}
regions.update(define_box_regions(3, bbox[0], bbox[1]))
functions = {
'get_mat': (lambda ts, coors, mode=None, problem=None, **kwargs:
get_mat(coors, mode, problem),),
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
}
materials = {
'mat': 'get_mat',
}
fields = {
'corrector' : ('real', 3, 'Y', 1),
}
variables = {
'u': ('unknown field', 'corrector'),
'v': ('test field', 'corrector', 'u'),
'Pi': ('parameter field', 'corrector', 'u'),
'Pi1': ('parameter field', 'corrector', '(set-to-None)'),
'Pi2': ('parameter field', 'corrector', '(set-to-None)'),
}
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:3]]
options = {
'coefs': 'coefs',
'requirements': 'requirements',
'volume': { 'variables' : ['u'], 'expression' : 'ev_volume.5.Y( u )' },
'output_dir': 'output',
'coefs_filename': 'coefs_le',
}
equation_corrs = {
'balance_of_forces':
"""dw_lin_elastic.5.Y(mat.D, v, u)
= - dw_lin_elastic.5.Y(mat.D, v, Pi)"""
}
coefs = {
'D' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : 'dw_lin_elastic.5.Y(mat.D, Pi1, Pi2 )',
'set_variables': [('Pi1', ('pis', 'corrs_rs'), 'u'),
('Pi2', ('pis', 'corrs_rs'), 'u')],
'class' : cb.CoefSymSym,
},
'vol': {
'regions': ['Ym', 'Yf'],
'expression': 'ev_volume.5.%s(u)',
'class': cb.VolumeFractions,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
},
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-4,
'problem': 'linear',
})
}
if is_opt:
options.update({
'parametric_hook': 'optimization_hook',
'float_format': '%.16e',
})
return locals()
|
sfepy/sfepy
|
examples/homogenization/homogenization_opt.py
|
Python
|
bsd-3-clause
| 5,071
|
[
"VTK"
] |
48888d9e981a943b1db547c983150d377065bb08aa62a6aeb69d00075e6c9d29
|
#!/usr/bin/env python3
###########
#
# Created by Bruno Costa @ITQB
# 24/04/2017
# This allows the extraction of specific coordinates from a fasta file
#
############
import argparse
import re
parser = argparse.ArgumentParser(description='This allows the extraction of specific coordinates from a fasta file')
## blast result, output, cutoff,
#parser.add_argument('--flag', type=str, nargs=1, metavar='', dest='', required=True, help='')
parser.add_argument('--fa', type=str, metavar='input file', dest='input', required=True, help='Input fasta file.')
parser.add_argument('--seq', type=str, metavar='Sequence', dest='seq', required=False, help='Sequence selector.')
parser.add_argument('--wildSeq', type=str, metavar='Sequence', dest='wildSeq', required=False, help='Sequence selector with wild-cards.')
parser.add_argument('--cStart', type=int, metavar='Coordinate', dest='cStart', required=True, help='Coordinate start.')
parser.add_argument('--cStop', type=int, metavar='Coordinate', dest='cStop', required=True, help='Coordinate end.')
args = parser.parse_args()
fasta_file=args.input
sequence=args.seq
Start=args.cStart
Stop=args.cStop
wildSequence=args.wildSeq
def FASTA_parser(fasta_file):
"""Franciso P.Martins - From https://github.com/StuntsPT/4Pipe4/blob/master/pipeutils.py
Parse, convert and return fasta files into a dict like: 'name':'seq'."""
fasta = open(fasta_file, 'r')
d = {}
for lines in fasta:
if lines.startswith('>'):
name = lines[1:].strip()
d[name] = ''
else:
d[name] += lines.strip().upper()
fasta.close()
return d
fasta=FASTA_parser(fasta_file)
if(sequence is not None):
print("Using provided sequence")
print(">"+sequence+"\n"+fasta[sequence][Start:Stop])
elif(wildSequence is not None):
print("Using wild-card sequence")
for seq in fasta.keys():
if(re.match(wildSequence,seq)):
print(">"+seq+"\n"+fasta[seq][Start:Stop])
else:
#Stop
print("No sequence provided - Splice done on all sequences")
for seq in fasta.keys():
print(">"+seq+"\n"+fasta[seq][Start:Stop])
|
netbofia/Fasta_extractor
|
splice_fasta.py
|
Python
|
mit
| 2,195
|
[
"BLAST"
] |
cf54e41f06b8b3c89add9ca05b333b110fd279c457ec66e20c677e92bfed7739
|
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
""" # "
import __builtin__, sys
# ## Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = '\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = '\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = '\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % (
self.__class__.__module__, self.__class__.__name__, self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
self.buffer = "" # unencoded input that is kept between calls to encode()
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete byte
sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
self.buffer = "" # undecoded input that is kept between calls to decode()
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = ""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = ""
# For str->str decoding this will stay a str
# For str->unicode decoding the first read will promote it to unicode
self.charbuffer = ""
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines) <= 1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(False)[0]
return line
readsize = size or 72
line = ""
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if data.endswith("\r"):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = "".join(lines[1:]) + self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = ""
self.charbuffer = u""
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def next(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def next(self):
""" Return the next decoded line from the input stream."""
return self.reader.next()
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None:
if 'U' in mode:
# No automatic conversion of '\n' is done on reading and writing
mode = mode.strip().replace('U', '')
if mode[:1] not in set('rwa'):
mode = 'r' + mode
if 'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode("", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i] = i
return res
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k, v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
ppyordanov/HCI_4_Future_Cities
|
Server/src/virtualenv/Lib/codecs.py
|
Python
|
mit
| 35,196
|
[
"FEFF"
] |
ef6c908e742337b8bfd224f397fafd2e11e929df6a7a8f9b68ace8f13525180c
|
from ob_pipelines import s3
import pysam
import sys
@s3.s3args()
def dedupe_by_pos_strand(infile, outfile):
infile = pysam.AlignmentFile(infile, 'rb')
outfile = pysam.AlignmentFile(outfile, 'wb', template=infile)
pos_list = set()
for aln in infile:
pos_strand = (aln.reference_start, aln.is_reverse)
if aln.reference_start == -1:
continue
if pos_strand in pos_list:
continue
outfile.write(aln)
pos_list.add(pos_strand)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: {} INFILE OUTFILE'.format(sys.argv[0]))
sys.exit(0)
_, infile, outfile = sys.argv
dedupe_by_pos_strand(infile, outfile)
|
outlierbio/ob-pipelines
|
ob_pipelines/apps/pysam/dedupe.py
|
Python
|
apache-2.0
| 721
|
[
"pysam"
] |
5bbb87827a63c959f81537891006c64a342827a58ef7ab3a2b0f90a2b54e5d4f
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r".+[\\\/]pnacl_shim\.c$",
)
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[/\\](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|perf|pixel|unit|ui)?test(_[a-z]+)?%s' %
_IMPLEMENTATION_EXTENSIONS,
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[/\\](test|tool(s)?)[/\\].*',
# content_shell is used for running layout tests.
r'content[/\\]shell[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Send mail to\n'
'marja@chromium.org if this is not the case.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^content[\\\/]shell[\\\/]shell_browser_main\.cc$",
r"^net[\\\/]disk_cache[\\\/]cache_util\.cc$",
),
),
(
'SkRefPtr',
(
'The use of SkRefPtr is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoRef',
(
'The indirect use of SkRefPtr via SkAutoRef is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoTUnref',
(
'The use of SkAutoTUnref is dangerous because it implicitly ',
'converts to a raw pointer. Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoUnref',
(
'The indirect use of SkAutoTUnref through SkAutoUnref is dangerous ',
'because it implicitly converts to a raw pointer. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
)
_VALID_OS_MACROS = (
# Please keep sorted.
'OS_ANDROID',
'OS_BSD',
'OS_CAT', # For testing.
'OS_CHROMEOS',
'OS_FREEBSD',
'OS_IOS',
'OS_LINUX',
'OS_MACOSX',
'OS_NACL',
'OS_OPENBSD',
'OS_POSIX',
'OS_SOLARIS',
'OS_WIN',
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
if IsBlacklisted(f, excluded_paths):
continue
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PresubmitError('checkperms.py failed.',
errors))
return results
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
else:
problem_linenums.append(line_num)
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d' % (file_path, line_num))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude #include <.../...> includes from the check; e.g., <sys/...> includes
# often need to appear in a specific order.
excluded_include_pattern = input_api.re.compile(r'\s*#include \<.*/.*')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = input_api.os_path.basename(
match_dict['FILE']).replace('.h', '')
if header_basename not in input_api.os_path.basename(f.LocalPath()):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
return []
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.cc', '.h')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
results.append(output_api.PresubmitPromptOrNotify(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
pattern = input_api.re.compile('"[^"]*google\.com[^"]*"')
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
return [output_api.PresubmitPromptOrNotify(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct? (Contact: joi@chromium.org)',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
"""
pattern = input_api.re.compile(r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$')
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
if pattern.match(f.LocalPath()):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _CheckAddedDepsHaveTargetApprovals(input_api, output_api):
"""When a dependency prefixed with + is added to a DEPS file, we
want to make sure that the change is reviewed by an OWNER of the
target file or directory, to avoid layering violations from being
introduced. This check verifies that this happens.
"""
changed_lines = set()
for f in input_api.AffectedFiles():
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
changed_lines |= set(line.strip()
for line_num, line
in f.ChangedContents())
if not changed_lines:
return []
virtual_depended_on_files = set()
# This pattern grabs the path without basename in the first
# parentheses, and the basename (if present) in the second. It
# relies on the simple heuristic that if there is a basename it will
# be a header file ending in ".h".
pattern = input_api.re.compile(
r"""['"]\+([^'"]+?)(/[a-zA-Z0-9_]+\.h)?['"].*""")
for changed_line in changed_lines:
m = pattern.match(changed_line)
if m:
virtual_depended_on_files.add('%s/DEPS' % m.group(1))
if not virtual_depended_on_files:
return []
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
if not input_api.change.issue:
return [output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
owners_db = input_api.owners_db
owner_email, reviewers = input_api.canned_checks._RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
reviewers_plus_owner = set([owner_email]).union(reviewers)
missing_files = owners_db.files_not_covered_by(virtual_depended_on_files,
reviewers_plus_owner)
unapproved_dependencies = ["'+%s'," % path[:-len('/DEPS')]
for path in missing_files]
if unapproved_dependencies:
output_list = [
output('Missing LGTM from OWNERS of directories added to DEPS:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
results.extend(_CheckForInvalidOSMacros(input_api, output_api))
results.extend(_CheckAddedDepsHaveTargetApprovals(input_api, output_api))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _DidYouMeanOSMacro(bad_macro):
try:
return {'A': 'OS_ANDROID',
'B': 'OS_BSD',
'C': 'OS_CHROMEOS',
'F': 'OS_FREEBSD',
'L': 'OS_LINUX',
'M': 'OS_MACOSX',
'N': 'OS_NACL',
'O': 'OS_OPENBSD',
'P': 'OS_POSIX',
'S': 'OS_SOLARIS',
'W': 'OS_WIN'}[bad_macro[3].upper()]
except KeyError:
return ''
def _CheckForInvalidOSMacrosInFile(input_api, f):
"""Check for sensible looking, totally invalid OS macros."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
os_macro = input_api.re.compile(r'defined\((OS_[^)]+)\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line):
for match in os_macro.finditer(line):
if not match.group(1) in _VALID_OS_MACROS:
good = _DidYouMeanOSMacro(match.group(1))
did_you_mean = ' (did you mean %s?)' % good if good else ''
results.append(' %s:%d %s%s' % (f.LocalPath(),
lnum,
match.group(1),
did_you_mean))
return results
def _CheckForInvalidOSMacros(input_api, output_api):
"""Check all affected files for invalid OS macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.py', '.js', '.html', '.css')):
bad_macros.extend(_CheckForInvalidOSMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Possibly invalid OS macro[s] found. Please fix your code\n'
'or add your macro to src/PRESUBMIT.py.', bad_macros)]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac_asan', 'mac:compile']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel', 'win7_aura', 'win:compile']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_dbg', 'android_clang_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = [
'android_clang_dbg',
'android_dbg',
'ios_dbg_simulator',
'ios_rel_device',
'linux_asan',
'linux_aura',
'linux_chromeos',
'linux_clang:compile',
'linux_rel',
'mac_asan',
'mac_rel',
'mac:compile',
'win7_aura',
'win_rel',
'win:compile',
]
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for chromeos.
if any(re.search('[/_](aura|chromeos)', f) for f in files):
trybots += ['linux_chromeos_clang:compile', 'linux_chromeos_asan']
return trybots
|
loopCM/chromium
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 36,636
|
[
"VisIt"
] |
153987a4065ddebfc8fb74d7b15359e199b3b8474e711a72eed12b4b61cc92cf
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""OpenMM Reporter for saving the state of a molecular dynamics simulation
through time in the AMBER NetCDF format
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
from mdtraj.formats.dcd import DCDTrajectoryFile
from mdtraj.reporters.basereporter import _BaseReporter
##############################################################################
# Imports
##############################################################################
class DCDReporter(_BaseReporter):
"""DCDReporter stores a molecular dynamics trajectory in the CHARMM / NAMD
DCD Format
Parameters
----------
file : str, or DCDTrajectoryFile
Either an open DCDTrajectoryFile object to write to, or a string
specifying the filename of a new DCD file to save the trajectory to.
reportInterval : int
The interval (in time steps) at which to write frames.
atomSubset : array_like, default=None
Only write a subset of the atoms, with these (zero based) indices
to the file. If None, *all* of the atoms will be written to disk.
Examples
--------
>>> simulation = Simulation(topology, system, integrator)
>>> dcd_reporter = DCDReporter('traj.dcd', 100)
>>> simulation.reporters.append(dcd_reporter)
>>> simulation.step(10000)
>>> traj = mdtraj.trajectory.load('traj.dcd')
"""
@property
def backend(self):
return DCDTrajectoryFile
def __init__(self, file, reportInterval, atomSubset=None):
super(DCDReporter, self).__init__(file, reportInterval,
coordinates=True, time=False, cell=True, potentialEnergy=False,
kineticEnergy=False, temperature=False, velocities=False,
atomSubset=atomSubset)
|
tcmoore3/mdtraj
|
mdtraj/reporters/dcdreporter.py
|
Python
|
lgpl-2.1
| 2,912
|
[
"Amber",
"CHARMM",
"MDTraj",
"NAMD",
"NetCDF",
"OpenMM"
] |
097e7ed8201c197fc5d191871690265790a0af1607488a2886a4493012dc0052
|
'''
An implementation of the Fully Adapted Auxiliary Particle Filter as described by Whiteley and Johansen
Chapter 3 Algorithm 2 on page 5 of Recent Developments in Auxiliary Particle Filtering:
http://www.maths.bris.ac.uk/~manpw/apf_chapter.pdf
The algorithm is taylored to perform inference in nonlinear dynamical systems and supports
the Fitzhugh Nagumo model (or Van der Pol Oscillator)
by Antonio Moretti - amoretti@cs.columbia.edu
'''
import math
import numpy as np
import scipy as sp
from scipy.stats import norm
from scipy import misc
def make_mvn_pdf(mu, sigma):
'''
creates a multivariate gaussian pdf
'''
def f(x):
return sp.stats.multivariate_normal.pdf(x, mu, sigma)
return f
def make_poisson(k):
'''
creates a multivariate poisson pmf
'''
def f(theta):
prob = 1
for i in range(len(k)):
prob *= sp.stats.poisson.pmf(k[i], np.exp(theta[i]))
return prob
return f
def fhn(Y, deltat, I):
'''
first order euler discretization of the fitzhugh nagumo differential equations
'''
y1 = Y[0]
y2 = Y[1]
return [y1 + (y1 - (y1**3)/3 - y2 + I)*deltat, y2 + (0.08*(y1 + 0.7 - 0.8*y2))*deltat]
def integrate_gaussian(grid, weight, mean, std, f):
'''
Performs gaussian quadrature via hermite polynomials
'''
sq2=np.sqrt(2)
zz = sq2*std*grid + mean
value = np.dot(f(zz), weight)/np.sqrt(np.pi)
return value
def bivariate_gauss_hermite(xt, wt, mean, T, gfunc, XX, YY):
'''
Performs two dimensional Gauss-Hermite Quadrature with a change of measure to account for mu and sigma
'''
import scipy as sp
from scipy import linalg
mat = np.array([XX.flatten(), YY.flatten()]).T
grid_trans = np.sqrt(2)*np.dot(mat , T) + mean
geval = np.asarray([gfunc(xx) for xx in grid_trans]).reshape([len(xt), len(xt)])
c = 1/(np.pi)
y_marginal = np.zeros(len(xt))
for idx in xrange(len(xt)):
y_marginal[idx] = np.dot(geval[idx,:], wt)
theta = np.dot(y_marginal, wt)*c
return theta
def apf(obs, time, n_particles, n_gridpoints, B, Sigma, Gamma, x_0, I_ext):
'''
Implements the Auxiliary Particle Filter as described by Whiteley and Johansen
Algorithm 2 on page 5: http://www.maths.bris.ac.uk/~manpw/apf_chapter.pdf
Input:
[obs] : a time x dimension matrix representing a time series of observed signals
[time] : a scalar representing the corresponding time length
[n_particles] : a scalar representing the number of particles to use in the simulation
[n_gridpoints] : a scalar representing the number of grid points or nodes to use for the quadrature
[B] : a 2x2 propagator matrix for the dynamics
[Sigma] : a 2x2 covariance matrix
[Gamma] : a 2x2 covariance matrix
[x_0] : a 2x1 vector of representing the initial value of the signal
[I_ext] : a scalar representing input current magnitude
Output:
[W] : an n_particles x time matrix of weights
[X] : an n_particles x time x dimension tensor of trajectories
[k] : an n_particles x time matrix of the posterior integral for each particle at each time point
Average the trajectory tensor (X) across particles to approximate the functional integral. Smooth the
resulting signal to remove noise.
'''
assert(len(obs) == time)
# Initialize variables
dimension = 2
n_gridpoints = n_gridpoints
X = np.zeros((n_particles, time, dimension))
W = np.zeros((n_particles, time))
k = np.zeros((n_particles, time))
proposal_covariance_matrix = 0.075*np.eye(dimension)
delta_t = 0.25
# Define gridpoints as roots of the hermite polynomials
[xt, wt] = np.polynomial.hermite.hermgauss(n_gridpoints)
# TO DO: generalize the computation of posterior integral
# Define our mesh for numerical integration
XX = np.tile(xt.reshape([1, len(xt)]), [len(xt), 1])
YY = np.tile(xt.reshape([len(xt), 1]), [1, len(xt)])
# Compute Cholesky decomposition of Sigma
T = sp.linalg.sqrtm(Sigma)
# Sample particles and weights at time 1
import pdb
for i in range(0,n_particles):
X[i,0,:] = np.random.randn(1,dimension)[0]
g = make_mvn_pdf(x_0, Gamma)(obs[0,:])#(np.dot(B,X[i,0,:])).ravel()
nu = make_mvn_pdf(x_0, Sigma)(X[i,0,:])
q = make_mvn_pdf(np.zeros(dimension), proposal_covariance_matrix)(X[i,0,:])
# initialize weights
W[i,0] = g*nu/q
# main loop of program at time > 1
for t in range(1, time):
# Update weights and propagate particles based on postrior integral
for i in range(n_particles):
# Compute the posterior integral p(y_n | x_{n-1})
# Define the mean of the Gaussian
g_mean = np.dot(B,X[i,t,:])
g_int_func = make_mvn_pdf(g_mean,Gamma)
# Call our quadrature subroutine
k[i,t] = bivariate_gauss_hermite(xt, wt, fhn(X[i,t-1,:], delta_t, I_ext), T, g_int_func, XX, YY)
# Reweight particles
W[i,t-1] = W[i,t-1]*k[i,t]
# Resample
Xprime = np.random.choice(n_particles, n_particles, p = W[:,t-1]/np.sum(W[:,t-1]), replace = True)
Xtilde = [X[i,t-1,:] for i in Xprime]
# Reset weights and particles
for i in range(n_particles):
# Select new particles
X[i,t-1,:] = Xtilde[i]
# Resample particles and reset weights
X[i,t,:] = np.random.randn(1,dimension)[0] + X[i,t-1,:]
# Update proposal and target distributions
reshaped_g_mean = np.dot(B,X[i,t,:]).ravel()
g = make_mvn_pdf(reshaped_g_mean,Gamma)(obs[t,:])
q = make_mvn_pdf(X[i,t-1,:],proposal_covariance_matrix)(X[i,t,:])
f = make_mvn_pdf(fhn(X[i,t-1,:],delta_t,I_ext),Sigma)(X[i,t,:])
# Update weights
W[i,t] = (g*f)/(k[i,t]*q)
print "time: ", t
return W, X, k
|
amoretti86/auxiliary-particle-filter
|
apf_fhn.py
|
Python
|
mit
| 6,209
|
[
"Gaussian"
] |
9613590e4ccedc1cbe3ad7178fa98650834af7396725b04c4d62cc47fb5f053a
|
import numpy as np
class QIFNetwork:
"""
Network of quadratic integrate-and-fire neurons.
"""
def __init__(self, _neuronsPerLayer, _Dmax):
"""
Initialise network with given number of neurons
Inputs:
_neuronsPerLayer -- List with the number of neurons in each layer. A list
[N1, N2, ... Nk] will return a network with k layers
with the corresponding number of neurons in each.
_Dmax -- Maximum delay in all the synapses in the network. Any
longer delay will result in failing to deliver spikes.
"""
self.Dmax = _Dmax
self.Nlayers = len(_neuronsPerLayer)
self.layer = {}
for i, n in enumerate(_neuronsPerLayer):
self.layer[i] = QIFLayer(n)
def Update(self, t):
"""
Run simulation of the whole network for 1 millisecond and update the
network's internal variables.
Inputs:
t -- Current timestep. Necessary to sort out the synaptic delays.
"""
for lr in xrange(self.Nlayers):
self.NeuronUpdate(lr, t)
def NeuronUpdate(self, i, t):
"""
QIF neuron update function. Update one layer for 1 millisecond
using the Euler method.
Inputs:
i -- Number of layer to update
t -- Current timestep. Necessary to sort out the synaptic delays.
"""
# Euler method step size in ms
dt = 0.2
# Calculate current from incoming spikes
for j in xrange(self.Nlayers):
# If layer[i].S[j] exists then layer[i].factor[j] and
# layer[i].delay[j] have to exist
if j in self.layer[i].S:
S = self.layer[i].S[j] # target neuron->rows, source neuron->columns
# Firings contains time and neuron idx of each spike.
# [t, index of the neuron in the layer j]
firings = self.layer[j].firings
# Find incoming spikes taking delays into account
delay = self.layer[i].delay[j]
F = self.layer[i].factor[j]
# Sum current from incoming spikes
k = len(firings)
while k > 0 and (firings[k-1, 0] > (t - self.Dmax)):
idx = delay[:, firings[k-1, 1]] == (t-firings[k-1, 0])
self.layer[i].I[idx] += F * S[idx, firings[k-1, 1]]
k = k-1
# Update v using the QIF equations and Euler method
for k in xrange(int(1/dt)):
v = self.layer[i].v
self.layer[i].v += dt*(
self.layer[i].a*(self.layer[i].vr - v)*(self.layer[i].vc - v) +
self.layer[i].R*self.layer[i].I) / self.layer[i].tau
# Find index of neurons that have fired this millisecond
fired = np.where(self.layer[i].v >= 30)[0]
if len(fired) > 0:
for f in fired:
# Add spikes into spike train
if len(self.layer[i].firings) != 0:
self.layer[i].firings = np.vstack([self.layer[i].firings, [t, f]])
else:
self.layer[i].firings = np.array([[t, f]])
# Reset the membrane potential after spikes
# Here's a little hack to see if vr is array or scalar
if hasattr(self.layer[i].vr, "__len__"):
self.layer[i].v[f] = self.layer[i].vr[f]
else:
self.layer[i].v[f] = self.layer[i].vr
return
class QIFLayer:
"""
Layer of quadratic integrate-and-fire neurons to be used inside an
QIFNetwork.
"""
def __init__(self, n):
"""
Initialise layer with empty vectors.
Inputs:
n -- Number of neurons in the layer
"""
self.N = n
self.R = np.zeros(n)
self.tau = np.zeros(n)
self.vr = np.zeros(n)
self.vc = np.zeros(n)
self.a = np.zeros(n)
self.S = {}
self.delay = {}
self.factor = {}
|
pmediano/ComputationalNeurodynamics
|
Fall2015/Exercise_2/Solutions/QIFNetwork.py
|
Python
|
gpl-3.0
| 3,697
|
[
"NEURON"
] |
b75faa17f5106034f8f55d9cb4ae730ba49964229c0019514e123e68a5674d1f
|
import gtk
import goocanvas
import cairo
LEFT = 50.0
RIGHT = 350.0
MIDDLE = 150.0
DEFAULT_WIDTH = 2
DEFAULT_SHAPE_A = 4
DEFAULT_SHAPE_B = 5
DEFAULT_SHAPE_C = 4
def set_dimension (canvas, arrow_name, text_name, x1, y1, x2, y2, tx, ty, dim):
points = goocanvas.Points([(x1, y1), (x2, y2)])
item = canvas.get_data(arrow_name)
item.props.points = points
item = canvas.get_data(text_name)
item.props.text = str(dim)
item.props.x = tx
item.props.y = ty
def move_drag_box(item, x, y):
item.props.x = x - 5.0
item.props.y = y - 5.0
def set_arrow_shape(canvas):
width = canvas.get_data("width")
shape_a = canvas.get_data("shape_a")
shape_b = canvas.get_data("shape_b")
shape_c = canvas.get_data("shape_c")
item = canvas.get_data("big_arrow")
item.props.line_width = 10.0 * width
item.props.arrow_tip_length = shape_a
item.props.arrow_length = shape_b
item.props.arrow_width = shape_c
x1 = RIGHT - 10 * shape_a * width
y1 = MIDDLE - 10 * width / 2
x2 = RIGHT - 10 * shape_b * width
y2 = MIDDLE - 10 * (shape_c * width / 2.0)
x3 = RIGHT
y3 = MIDDLE
x4 = x2
y4 = MIDDLE + 10 * (shape_c * width / 2.0)
x5 = x1
y5 = MIDDLE + 10 * width / 2
points = goocanvas.Points([(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x5, y5)])
item = canvas.get_data("outline")
item.props.points = points
move_drag_box(canvas.get_data("width_drag_box"), LEFT, MIDDLE - 10 * width / 2.0)
move_drag_box (canvas.get_data("shape_a_drag_box"), RIGHT - 10 * shape_a * width, MIDDLE)
move_drag_box (canvas.get_data("shape_b_c_drag_box"), RIGHT - 10 * shape_b * width,
MIDDLE - 10 * (shape_c * width / 2.0))
set_dimension(canvas, "width_arrow", "width_text",
LEFT - 10,
MIDDLE - 10 * width / 2.0,
LEFT - 10,
MIDDLE + 10 * width / 2.0,
LEFT - 15,
MIDDLE,
width)
set_dimension (canvas, "shape_a_arrow", "shape_a_text",
RIGHT - 10 * shape_a * width,
MIDDLE + 10 * (shape_c * width / 2.0) + 10,
RIGHT,
MIDDLE + 10 * (shape_c * width / 2.0) + 10,
RIGHT - 10 * shape_a * width / 2.0,
MIDDLE + 10 * (shape_c * width / 2.0) + 15,
shape_a)
set_dimension (canvas, "shape_b_arrow", "shape_b_text",
RIGHT - 10 * shape_b * width,
MIDDLE + 10 * (shape_c * width / 2.0) + 35,
RIGHT,
MIDDLE + 10 * (shape_c * width / 2.0) + 35,
RIGHT - 10 * shape_b * width / 2.0,
MIDDLE + 10 * (shape_c * width / 2.0) + 40,
shape_b)
set_dimension (canvas, "shape_c_arrow", "shape_c_text",
RIGHT + 10,
MIDDLE - 10 * shape_c * width / 2.0,
RIGHT + 10,
MIDDLE + 10 * shape_c * width / 2.0,
RIGHT + 15,
MIDDLE,
shape_c)
item = canvas.get_data("width_info")
item.props.text = "line-width: " + str(width)
item = canvas.get_data("shape_a_info")
item.props.text = "arrow-tip-length: " + str(shape_a) + " (* line-width)"
item = canvas.get_data("shape_b_info")
item.props.text = "arrow-length: " + str(shape_b) + " (* line-width)"
item = canvas.get_data("shape_c_info")
item.props.text = "arrow-width: " + str(shape_c) + " (* line-width)"
item = canvas.get_data("sample_1")
item.props.line_width = width
item.props.arrow_tip_length = shape_a
item.props.arrow_length = shape_b
item.props.arrow_width = shape_c
item = canvas.get_data("sample_2")
item.props.line_width = width
item.props.arrow_tip_length = shape_a
item.props.arrow_length = shape_b
item.props.arrow_width = shape_c
item = canvas.get_data("sample_3")
item.props.line_width = width
item.props.arrow_tip_length = shape_a
item.props.arrow_length = shape_b
item.props.arrow_width = shape_c
def create_drag_box(canvas, root, box_name):
item = goocanvas.Rect(parent = root,
x=0, y=0, width=10, height=10,
fill_color="black",
stroke_color="black",
line_width=1.0)
canvas.set_data(box_name, item)
item.connect("enter_notify_event", on_enter_notify)
item.connect("leave_notify_event", on_leave_notify)
item.connect("button_press_event", on_button_press)
item.connect("button_release_event", on_button_release)
item.connect("motion_notify_event", on_motion)
def create_dimension(canvas, root, arrow_name, text_name, anchor):
p3 = goocanvas.Polyline(parent = root,
fill_color="black",
start_arrow=True,
end_arrow=True)
canvas.set_data(arrow_name, p3)
text = goocanvas.Text(parent = root,
x=0, y=0, width=-1, anchor=anchor,
fill_color="black",
font="Sans 12")
canvas.set_data(text_name, text)
def create_info(canvas, root, info_name, x, y):
t = goocanvas.Text(parent = root,
x=x, y=y, width=-1,
anchor=gtk.ANCHOR_NW,
fill_color="black",
font="Sans 14")
canvas.set_data(info_name, t)
def create_sample_arrow(canvas, root, sample_name, x1, x2, y1, y2):
p4 = goocanvas.polyline_new_line(root, x1, x2, y1, y2,
start_arrow=True,
end_arrow=True)
canvas.set_data(sample_name, p4)
def on_enter_notify(item, target, event):
item.props.fill_color = "red"
return True
def on_leave_notify(item, target, event):
item.props.fill_color = "black"
return True
def on_button_press(item, target, event):
fleur = gtk.gdk.Cursor(gtk.gdk.FLEUR)
canvas = item.get_canvas ()
canvas.pointer_grab(item,
gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.BUTTON_RELEASE_MASK,
fleur, event.time)
return True
def on_button_release(item, target, event):
canvas = item.get_canvas ()
canvas.pointer_ungrab(item, event.time)
return True
def on_motion(item, target, event):
canvas = item.get_canvas ()
change = False
if not event.state == gtk.gdk.BUTTON1_MASK:
return False
if item == canvas.get_data("width_drag_box"):
y = event.y
width = (MIDDLE - y) / 5
if width < 0:
return False
canvas.set_data("width", width)
set_arrow_shape (canvas)
elif item == canvas.get_data("shape_a_drag_box"):
x = event.x
width = canvas.get_data("width")
shape_a = (RIGHT - x) / 10 / width
if shape_a < 0 or shape_a > 30:
return False
canvas.set_data("shape_a", shape_a)
set_arrow_shape (canvas)
elif item == canvas.get_data("shape_b_c_drag_box"):
x = event.x
width = canvas.get_data("width")
shape_b = (RIGHT - x) / 10 / width
if shape_b >= 0 and shape_b <= 30:
canvas.set_data("shape_b", shape_b)
change = True
y = event.y
shape_c = (MIDDLE - y) * 2 / 10 / width
if shape_c >= 0:
canvas.set_data("shape_c", shape_c)
change = True
if change:
set_arrow_shape (canvas)
return True
def create_canvas_arrowhead ():
v = gtk.VBox(False, 4)
v.set_border_width(4)
l = gtk.Label("This demo allows you to edit arrowhead shapes. Drag the little boxes\n"
"to change the shape of the line and its arrowhead. You can see the\n"
"arrows at their normal scale on the right hand side of the window.")
a = gtk.Alignment(0.5, 0.5, 0.0, 0.0)
v.pack_start(l, False, False, 0)
v.pack_start(a, True, True, 0)
f = gtk.Frame()
f.set_shadow_type(gtk.SHADOW_IN)
a.add(f)
canvas = goocanvas.Canvas()
canvas.set_size_request(500, 350)
canvas.set_bounds(0, 0, 500, 350)
f.add(canvas)
root = canvas.get_root_item()
canvas.set_data("width", DEFAULT_WIDTH)
canvas.set_data("shape_a", DEFAULT_SHAPE_A)
canvas.set_data("shape_b", DEFAULT_SHAPE_B)
canvas.set_data("shape_c", DEFAULT_SHAPE_C)
p1 = goocanvas.polyline_new_line(root, LEFT, MIDDLE, RIGHT, MIDDLE,
stroke_color="mediumseagreen",
end_arrow=True)
canvas.set_data("big_arrow", p1)
p2 = goocanvas.Polyline(parent = root,
close_path=True,
stroke_color="black",
line_width=2.0,
line_cap=cairo.LINE_CAP_ROUND,
line_join=cairo.LINE_JOIN_ROUND)
canvas.set_data("outline", p2)
create_drag_box (canvas, root, "width_drag_box")
create_drag_box (canvas, root, "shape_a_drag_box")
create_drag_box (canvas, root, "shape_b_c_drag_box")
create_dimension (canvas, root, "width_arrow", "width_text", gtk.ANCHOR_E)
create_dimension (canvas, root, "shape_a_arrow", "shape_a_text", gtk.ANCHOR_N)
create_dimension (canvas, root, "shape_b_arrow", "shape_b_text", gtk.ANCHOR_N)
create_dimension (canvas, root, "shape_c_arrow", "shape_c_text", gtk.ANCHOR_W)
create_info (canvas, root, "width_info", LEFT, 260)
create_info (canvas, root, "shape_a_info", LEFT, 280)
create_info (canvas, root, "shape_b_info", LEFT, 300)
create_info (canvas, root, "shape_c_info", LEFT, 320)
p_div = goocanvas.polyline_new_line(root, RIGHT + 50, 0, RIGHT + 50, 1000,
fill_color="black", line_width=2.0)
create_sample_arrow (canvas, root, "sample_1",
RIGHT + 100, 30, RIGHT + 100, MIDDLE - 30)
create_sample_arrow (canvas, root, "sample_2",
RIGHT + 70, MIDDLE, RIGHT + 130, MIDDLE)
create_sample_arrow (canvas, root, "sample_3",
RIGHT + 70, MIDDLE + 30, RIGHT + 130, MIDDLE + 120)
set_arrow_shape(canvas)
return v
def main ():
v = create_canvas_arrowhead ()
w = gtk.Window()
w.connect("destroy", gtk.main_quit)
w.add(v)
w.show_all()
gtk.main()
if __name__ == "__main__":
main()
|
GNOME/pygoocanvas
|
demo/simple_demo/arrowhead_demo.py
|
Python
|
lgpl-2.1
| 10,540
|
[
"FLEUR"
] |
e562335206283defbb48659cbf11fa06248d15f99a28060f4e3bb2300b1b3533
|
"""
# ==============================================================================
Plots data vs model response computed by an Inversion Method
# ==============================================================================
"""
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FormatStrFormatter
import mtpy.imaging.mtplottools as mtplottools
from mtpy.modeling.modem import Data
import click
try:
from pyevtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need to pip install PyEVTK:'
' https://bitbucket.org/pauloh/pyevtk')
print ('Note: if you are using Windows you should build evtk first with'
'either MinGW or cygwin using the command: \n'
' python setup.py build -compiler=mingw32 or \n'
' python setup.py build -compiler=cygwin')
class PlotResponse(object):
"""
plot data and response
Plots the real and imaginary impedance and induction vector if present.
:Example: ::
>>> import mtpy.modeling.new_modem as modem
>>> dfn = r"/home/MT/ModEM/Inv1/DataFile.dat"
>>> rfn = r"/home/MT/ModEM/Inv1/Test_resp_000.dat"
>>> mrp = modem.PlotResponse(data_fn=dfn, resp_fn=rfn)
>>> # plot only the TE and TM modes
>>> mrp.plot_component = 2
>>> mrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.lw = kwargs.pop('lw', .5)
self.ls = kwargs.pop('ls',':')
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
# color mode
if self.color_mode == 'color':
# color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
# color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
# black and white mode
elif self.color_mode == 'bw':
# color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
# color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits = kwargs.pop('phase_limits', None)
self.res_limits = kwargs.pop('res_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .98)
self.subplot_left = kwargs.pop('subplot_left', .08)
self.subplot_top = kwargs.pop('subplot_top', .85)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.legend_loc = 'upper right'
# self.legend_pos = (.5, 1.21)
self.legend_pos = (.3, 1.18)
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
self.ax_list = []
# this __init__ is a constructor which creates an object pObj, call pObj.plot() method will do
# if self.plot_yn == 'y':
# self.plot()
return
def plot(self, save2file=None):
"""
plot show figure and optionally save to a file named save2file
"""
self.data_object = Data()
self.data_object.read_data_file(self.data_fn)
# get shape of impedance tensors
ns = len(self.data_object.mt_dict.keys())
# read in response files
if self.resp_fn is not None:
self.resp_object = []
if not isinstance(self.resp_fn, list):
resp_obj = Data()
resp_obj.read_data_file(self.resp_fn)
self.resp_object = [resp_obj]
else:
for rfile in self.resp_fn:
resp_obj = Data()
resp_obj.read_data_file(rfile)
self.resp_object.append(resp_obj)
# get number of response files
nr = len(self.resp_object)
if isinstance(self.plot_type, list):
ns = len(self.plot_type)
# --> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size': self.font_size + 2, 'weight': 'bold'}
if self.plot_z == True:
h_ratio = [1, 1]
elif self.plot_z == False:
# h_ratio = [2, 1.5]
h_ratio = [2.0, 1.5, 0.75]
self.ax_list = []
line_list = []
label_list = []
# --> make key word dictionaries for plotting
kw_xx = {'color': self.cted,
'marker': self.mted,
'ms': self.ms,
'ls': self.ls,
'lw': self.lw,
'e_capsize': self.e_capsize,
'e_capthick': self.e_capthick}
kw_yy = {'color': self.ctmd,
'marker': self.mtmd,
'ms': self.ms,
'ls': self.ls,
'lw': self.lw,
'e_capsize': self.e_capsize,
'e_capthick': self.e_capthick}
if self.plot_type != '1':
pstation_list = []
if not isinstance(self.plot_type, list):
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.mt_dict.keys()):
if not isinstance(station, int):
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(station)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = self.data_object.mt_dict.keys()
for jj, station in enumerate(pstation_list):
z_obj = self.data_object.mt_dict[station].Z
t_obj = self.data_object.mt_dict[station].Tipper
period = self.data_object.period_list
# convert to apparent resistivity and phase
rp = mtplottools.ResPhase(z_object=z_obj)
# find locations where points have been masked
nzxx = np.nonzero(z_obj.z[:, 0, 0])[0]
nzxy = np.nonzero(z_obj.z[:, 0, 1])[0]
nzyx = np.nonzero(z_obj.z[:, 1, 0])[0]
nzyy = np.nonzero(z_obj.z[:, 1, 1])[0]
ntx = np.nonzero(t_obj.tipper[:, 0, 0])[0]
nty = np.nonzero(t_obj.tipper[:, 0, 1])[0]
if self.resp_fn is not None:
plotr = True
else:
plotr = False
# make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
self.fig_list.append(fig)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
# set the grid of subplots
tipper_zero = (np.round(abs(t_obj.tipper.mean()), 4) == 0.0)
if tipper_zero == False:
# makes more sense if plot_tipper is True to plot tipper
plot_tipper = True
else:
plot_tipper = False
if plot_tipper == True:
# gs = gridspec.GridSpec(2, 6,
# wspace=self.subplot_wspace,
# left=self.subplot_left,
# top=self.subplot_top,
# bottom=self.subplot_bottom,
# right=self.subplot_right,
# hspace=self.subplot_hspace,
# height_ratios=h_ratio)
# Changed for testing
if len(h_ratio) < 3 :
h_ratio = [2.0, 1.5, 0.75]
gs = gridspec.GridSpec(3, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
else:
gs = gridspec.GridSpec(2, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
# ---------plot the apparent resistivity---------------------------
# plot each component in its own subplot
if self.plot_style == 1:
# plot xy and yx
if self.plot_component == 2:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:2])
axryx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axpyx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
else:
axrxy = fig.add_subplot(gs[0, 0:2])
axryx = fig.add_subplot(gs[0, 2:4], sharex=axrxy)
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axpyx = fig.add_subplot(gs[1, 2:4], sharex=axrxy)
axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
axtr.set_ylim(-1.2, 1.2)
axti.set_ylim(-1.2, 1.2)
if self.plot_z == False:
# plot resistivity
erxy = mtplottools.plot_errorbar(axrxy,
period,
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
# plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
elif self.plot_z == True:
# plot real
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].real),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].real),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
# plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].imag),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].imag),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
# plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period[ntx],
t_obj.tipper[
ntx, 0, 0].real,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period[nty],
t_obj.tipper[
nty, 0, 1].real,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period[ntx],
t_obj.tipper[
ntx, 0, 0].imag,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period[nty],
t_obj.tipper[
nty, 0, 1].imag,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
if plot_tipper == False:
self.ax_list = [axrxy, axryx, axpxy, axpyx]
line_list = [[erxy[0]], [eryx[0]]]
label_list = [['$Z_{xy}$'], ['$Z_{yx}$']]
else:
self.ax_list = [axrxy, axryx, axpxy, axpyx, axtr, axti]
line_list = [[erxy[0]], [eryx[0]],
[ertx[0], erty[0]]]
label_list = [['$Z_{xy}$'], ['$Z_{yx}$'],
['$T_{x}$', '$T_{y}$']]
elif self.plot_component == 4:
if plot_tipper == False:
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
else:
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
axtxr = fig.add_subplot(gs[0, 4], sharex=axrxx)
axtxi = fig.add_subplot(gs[1, 4], sharex=axrxx)
axtyr = fig.add_subplot(gs[0, 5], sharex=axrxx)
axtyi = fig.add_subplot(gs[1, 5], sharex=axrxx)
axtxr.set_ylim(-1.2, 1.2)
axtxi.set_ylim(-1.2, 1.2)
axtyr.set_ylim(-1.2, 1.2)
axtyi.set_ylim(-1.2, 1.2)
if self.plot_z == False:
# plot resistivity
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
**kw_yy)
# plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
**kw_yy)
elif self.plot_z == True:
# plot real
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(z_obj.z[
nzxx, 0, 0].real),
abs(z_obj.z_err[
nzxx, 0, 0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].real),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].real),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
abs(z_obj.z[
nzyy, 1, 1].real),
abs(z_obj.z_err[
nzyy, 1, 1].real),
**kw_yy)
# plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(z_obj.z[
nzxx, 0, 0].imag),
abs(z_obj.z_err[
nzxx, 0, 0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].imag),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].imag),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
abs(z_obj.z[
nzyy, 1, 1].imag),
abs(z_obj.z_err[
nzyy, 1, 1].real),
**kw_yy)
# plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
t_obj.tipper[
ntx, 0, 0].real,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyr,
period[nty],
t_obj.tipper[
nty, 0, 1].real,
t_obj.tipper_err[
nty, 0, 0],
**kw_yy)
ertx = mtplottools.plot_errorbar(axtxi,
period[ntx],
t_obj.tipper[
ntx, 0, 0].imag,
t_obj.tipper_err[
ntx, 0, 1],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyi,
period[nty],
t_obj.tipper[
nty, 0, 1].imag,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
if plot_tipper == False:
self.ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy]
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]]]
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
else:
self.ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy,
axtxr, axtxi, axtyr, axtyi]
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]],
[ertx[0]], [erty[0]]]
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$'],
['$T_{x}$'], ['$T_{y}$']]
# set axis properties
for aa, ax in enumerate(self.ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
# ylabels = ax.get_yticks().tolist()
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
# print ylabels
#
# dy = abs(ax.yaxis.get_ticklocs()[1]-
# ax.yaxis.get_ticklocs()[0])
# ylim = ax.get_ylim()
# ax.set_ylim(ylim[0]-.25*dy, ylim[1]+1.25*dy)
# ax.yaxis.set_major_locator(MultipleLocator(dy))
if len(self.ax_list) == 4:
#ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)] + \
[' ']
ax.set_yticklabels(ylabels)
if len(self.ax_list) == 6:
if aa < 4:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)] + \
[' ']
ax.set_yticklabels(ylabels)
if len(self.ax_list) == 8:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)] + \
[' ']
ax.set_yticklabels(ylabels)
if len(self.ax_list) == 12:
if aa < 4:
ylabels = ax.get_yticks().tolist()
ylabels[0] = ''
ax.set_yticklabels(ylabels)
if aa < 8:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)] + \
[' ']
ax.set_yticklabels(ylabels)
if len(self.ax_list) == 4 or len(self.ax_list) == 6:
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
# set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z]| (mV/km nT)',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z]| (mV/km nT)',
fontdict=fontdict)
elif len(self.ax_list) == 8 or len(self.ax_list) == 12:
if aa < 4:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ', ' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]) + 1,
np.log10(ylimits[1]) + 1, 1)]
ax.set_yticklabels(ylabels)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
if aa == 8 or aa == 10:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
# set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z]| (mV/km nT)',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z]| (mV/km nT)',
fontdict=fontdict)
ax.set_xscale('log', nonposx='clip')
ax.set_xlim(xmin=10 ** (np.floor(np.log10(period[0]))) * 1.01,
xmax=10 ** (np.ceil(np.log10(period[-1]))) * .99)
ax.grid(True, alpha=.25)
# plot xy and yx together and xx, yy together
elif self.plot_style == 2:
if self.plot_component == 2:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:])
axpxy = fig.add_subplot(gs[1, 0:], sharex=axrxy)
else:
axrxy = fig.add_subplot(gs[0, 0:4])
axpxy = fig.add_subplot(gs[1, 0:4], sharex=axrxy)
axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
if self.plot_z == False:
# plot resistivity
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
# plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
elif self.plot_z == True:
# plot real
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].real),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[
nzxy, 1, 0].real),
abs(z_obj.z_err[
nzxy, 1, 0].real),
**kw_yy)
# plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].imag),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].imag),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
# plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period,
t_obj.tipper[
ntx, 0, 0].real,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period,
t_obj.tipper[
nty, 0, 1].real,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period,
t_obj.tipper[
ntx, 0, 0].imag,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period,
t_obj.tipper[
nty, 0, 1].imag,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
if plot_tipper == False:
self.ax_list = [axrxy, axpxy]
line_list = [erxy[0], eryx[0]]
label_list = ['$Z_{xy}$', '$Z_{yx}$']
else:
ax_list = [axrxy, axpxy, axtr, axti]
line_list = [[erxy[0], eryx[0]],
[ertx[0], erty[0]]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$T_{x}$', '$T_{y}$']]
elif self.plot_component == 4:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:2])
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axrxx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
else:
# axrxy = fig.add_subplot(gs[0, 0:2])
# axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
#
#
# axrxx = fig.add_subplot(gs[0, 2:4], sharex=axrxy)
# axpxx = fig.add_subplot(gs[1, 2:4], sharex=axrxy)
#
# axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
# axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
axrxy = fig.add_subplot(gs[0, 0])
axpxy = fig.add_subplot(gs[1, 0], sharex=axrxy)
axrxx = fig.add_subplot(gs[0, 1], sharex=axrxy)
axpxx = fig.add_subplot(gs[1, 1], sharex=axrxy)
axtr = fig.add_subplot(gs[2, 0], sharex=axrxy)
axti = fig.add_subplot(gs[2, 1], sharex=axrxy)
if self.plot_z == False:
# plot resistivity
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
**kw_yy)
# plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
**kw_yy)
elif self.plot_z == True:
# plot real
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(z_obj.z[
nzxx, 0, 0].real),
abs(z_obj.z_err[
nzxx, 0, 0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].real),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].real),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
abs(z_obj.z[
nzyy, 1, 1].real),
abs(z_obj.z_err[
nzyy, 1, 1].real),
**kw_yy)
# plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(z_obj.z[
nzxx, 0, 0].imag),
abs(z_obj.z_err[
nzxx, 0, 0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[
nzxy, 0, 1].imag),
abs(z_obj.z_err[
nzxy, 0, 1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(z_obj.z[
nzyx, 1, 0].imag),
abs(z_obj.z_err[
nzyx, 1, 0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
abs(z_obj.z[
nzyy, 1, 1].imag),
abs(z_obj.z_err[
nzyy, 1, 1].real),
**kw_yy)
# plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period[ntx],
t_obj.tipper[
ntx, 0, 0].real,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period[nty],
t_obj.tipper[
nty, 0, 1].real,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period[ntx],
t_obj.tipper[
ntx, 0, 0].imag,
t_obj.tipper_err[
ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period[nty],
t_obj.tipper[
nty, 0, 1].imag,
t_obj.tipper_err[
nty, 0, 1],
**kw_yy)
if plot_tipper == False:
self.ax_list = [axrxy, axrxx, axpxy, axpxx]
line_list = [[erxy[0], eryx[0]], [erxx[0], eryy[0]]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$Z_{xx}$', '$Z_{yy}$']]
else:
self.ax_list = [axrxy, axrxx, axpxy, axpxx, axtr, axti]
line_list = [[erxy[0], eryx[0]], [erxx[0], eryy[0]],
[ertx[0]], erty[0]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$Z_{xx}$', '$Z_{yy}$'],
['$T_x$', '$T_y$']]
# set axis properties
for aa, ax in enumerate(self.ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
# ylabels = ax.get_yticks().tolist()
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
if len(self.ax_list) == 2:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** np.floor(np.log10(ylim[0])),
10 ** np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' '] + \
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)] + \
[' ']
ax.set_yticklabels(ylabels)
if aa == 0:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z (mV/km nT)]|',
fontdict=fontdict)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z (mV/km nT)]|',
fontdict=fontdict)
elif len(self.ax_list) == 4 and plot_tipper == False:
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
if self.plot_z == False:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 1:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif len(self.ax_list) == 4 and plot_tipper == True:
if aa == 0 or aa == 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 1:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif len(self.ax_list) == 6 and plot_tipper == True:
if aa <= 2: # Changes applied
# plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
if aa == 0 or aa == 1:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10 ** (np.floor(np.log10(ylim[0]))),
10 ** (np.ceil(np.log10(ylim[1]))))
ax.set_ylim(ylimits)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res . ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
if aa <= 2: # Setting the decimal places
ax.yaxis.set_major_formatter(
FormatStrFormatter('%.0f'))
pass
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
# else:
# plt.setp(ax.yaxis.get_ticklabels(), visible=False)
if aa == 4:
if self.plot_z == False:
ax.set_ylabel('Tipper',
fontdict=fontdict)
# writing x axis ticks and making it visible
if aa == 4 or aa == 5:
plt.setp(ax.get_xticklabels(), visible=True)
else:
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xscale('log', nonposx='clip')
ax.set_xlim(xmin=10 ** (np.floor(np.log10(period[0]))) * 1.01,
xmax=10 ** (np.ceil(np.log10(period[-1]))) * .99)
ax.grid(True, alpha=.25)
if plotr == True:
for rr in range(nr):
if self.color_mode == 'color':
cxy = (0, .4 + float(rr) / (3 * nr), 0)
cyx = (.7 + float(rr) / (4 * nr), .13, .63 -
float(rr) / (4 * nr))
elif self.color_mode == 'bw':
cxy = tuple(3 * [1 - .5 / (rr + 1)])
cyx = tuple(3 * [1 - .5 / (rr + 1)])
resp_z_obj = self.resp_object[rr].mt_dict[station].Z
resp_z_err = np.nan_to_num(
(z_obj.z - resp_z_obj.z) / z_obj.z_err)
resp_t_obj = self.resp_object[rr].mt_dict[station].Tipper
resp_t_err = np.nan_to_num((t_obj.tipper - resp_t_obj.tipper) /
t_obj.tipper_err)
rrp = mtplottools.ResPhase(resp_z_obj)
rms = resp_z_err.std()
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
rms_tx = resp_t_err[:, 0, 0].std()
rms_ty = resp_t_err[:, 0, 1].std()
print ' --- response {0} ---'.format(rr)
print ' RMS = {:.2f}'.format(rms)
print ' RMS_xx = {:.2f}'.format(rms_xx)
print ' RMS_xy = {:.2f}'.format(rms_xy)
print ' RMS_yx = {:.2f}'.format(rms_yx)
print ' RMS_yy = {:.2f}'.format(rms_yy)
print ' RMS_Tx = {:.2f}'.format(rms_tx)
print ' RMS_Ty = {:.2f}'.format(rms_ty)
# --> make key word dictionaries for plotting
kw_xx = {'color': self.ctem,#cxy,
'marker': self.mtem,
'ms': self.ms,
'ls': self.ls,
'lw': self.lw,
'e_capsize': self.e_capsize,
'e_capthick': self.e_capthick}
kw_yy = {'color': self.ctmm,#cyx,
'marker': self.mtmm,
'ms': self.ms,
'ls': self.ls,
'lw': self.lw,
'e_capsize': self.e_capsize,
'e_capthick': self.e_capthick}
if self.plot_style == 1:
if self.plot_component == 2:
if self.plot_z == False:
# plot resistivity
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[
nzyx],
**kw_yy)
# plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[
nzyx],
**kw_yy)
elif self.plot_z == True:
# plot real
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].real),
**kw_yy)
# plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxy[0]]
line_list[1] += [reryx[0]]
label_list[0] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy)]
label_list[1] += ['$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
else:
line_list[0] += [rerxy[0]]
line_list[1] += [reryx[0]]
line_list[2] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy)]
label_list[1] += ['$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[2] += ['$T^m_{x}$' +
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$' +
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_component == 4:
if self.plot_z == False:
# plot resistivity
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[
nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[
nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
rrp.resyy[
nzyy],
**kw_yy)
# plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[
nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[
nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
rrp.phaseyy[
nzyy],
**kw_yy)
elif self.plot_z == True:
# plot real
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(resp_z_obj.z[
nzxx, 0, 0].real),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].real),
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
abs(resp_z_obj.z[
nzyy, 1, 1].real),
**kw_yy)
# plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(resp_z_obj.z[
nzxx, 0, 0].imag),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].imag),
**kw_yy)
reryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
abs(resp_z_obj.z[
nzyy, 1, 1].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyr,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axtxi,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyi,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ ' +
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ ' +
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
line_list[4] += [rertx[0]]
line_list[5] += [rerty[0]]
label_list[0] += ['$Z^m_{xx}$ ' +
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ ' +
'rms={0:.2f}'.format(rms_yy)]
label_list[4] += ['$T^m_{x}$' +
'rms={0:.2f}'.format(rms_tx)]
label_list[5] += ['$T^m_{y}$' +
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_style == 2:
if self.plot_component == 2:
if self.plot_z == False:
# plot resistivity
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[
nzyx],
**kw_yy)
# plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[
nzyx],
**kw_yy)
elif self.plot_z == True:
# plot real
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].real),
**kw_yy)
# plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].imag),
**kw_xx)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list += [rerxy[0], reryx[0]]
label_list += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
else:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$T^m_{x}$' +
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$' +
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_component == 4:
if self.plot_z == False:
# plot resistivity
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[
nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[
nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
rrp.resyy[
nzyy],
**kw_yy)
# plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[
nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[
nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[
nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
rrp.phaseyy[
nzyy],
**kw_yy)
elif self.plot_z == True:
# plot real
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(resp_z_obj.z[
nzxx, 0, 0].real),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].real),
**kw_yy)
reryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
abs(resp_z_obj.z[
nzyy, 1, 1].real),
**kw_yy)
# plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(resp_z_obj.z[
nzxx, 0, 0].imag),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[
nzxy, 0, 1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(resp_z_obj.z[
nzyx, 1, 0].imag),
**kw_yy)
reryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
abs(resp_z_obj.z[
nzyy, 1, 1].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[
ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[
nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rerxx[0], reryy[0]]
label_list[0] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$Z^m_{xx}$ ' +
'rms={0:.2f}'.format(rms_xx),
'$Z^m_{yy}$ ' +
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rerxx[0], reryy[0]]
line_list[2] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ ' +
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ ' +
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$Z^m_{xx}$ ' +
'rms={0:.2f}'.format(rms_xx),
'$Z^m_{yy}$ ' +
'rms={0:.2f}'.format(rms_yy)]
label_list[2] += ['$T^m_{x}$' +
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$' +
'rms={0:.2f}'.format(rms_ty)]
# make legends
if self.plot_style == 1:
legend_ax_list = self.ax_list[0:self.plot_component]
if plot_tipper == True:
if self.plot_component == 2:
legend_ax_list.append(self.ax_list[4])
elif self.plot_component == 4:
legend_ax_list.append(self.ax_list[8])
legend_ax_list.append(self.ax_list[10])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size': max([self.font_size / (nr + 1), 5])})
if self.plot_style == 2:
if self.plot_component == 2:
legend_ax_list = [self.ax_list[0]]
if plot_tipper == True:
legend_ax_list.append(self.ax_list[2])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size': max([self.font_size / (nr + 1), 5])})
else:
legend_ax_list = self.ax_list[0:self.plot_component / 2]
if plot_tipper == True:
if self.plot_component == 2:
legend_ax_list.append(self.ax_list[2])
elif self.plot_component == 4:
legend_ax_list.append(self.ax_list[4])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size': max([self.font_size / (nr + 1), 5])})
if save2file is not None:
#plt.savefig(save2file)
self._save_figure(save2file)
else:
pass
plt.show() # --> BE SURE TO SHOW THE PLOT
# the figure need to be closed (X) then the following code save it to a
# file.
# if save2file is not None:
# # fig.savefig(save2file, dpi=self.fig_dpi, bbox_inches='tight')
# #figfile = self.save_figure0(save2file)
# plt.savefig(save2file)
#
#
# return save2file
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def _save_figure(self, save_fn, orientation='portrait',
fig_dpi=200, close_fig='n'):
"""
Internal function to save the plotted figure to a file: save_fn.
The file format will be automatically determined by save_fn suffix: pdf | eps | jpg | png | svg ]
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
"""
#plt.savefig(save_fn, dpi=fig_dpi, format=file_format, orientation=orientation, bbox_inches='tight')
plt.savefig(save_fn, dpi=fig_dpi, orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close()
else:
pass
print ('Saved figure to: ' + save_fn)
return save_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
# ==================================================================================
# FZ: add example usage code
# Justdo> python mtpy/imaging/plot_response.py
# ==================================================================================
# if __name__ == "__main__old":
#
# from mtpy.mtpy_globals import *
#
# # directory where files are located
# # wd = os.path.join(SAMPLE_DIR, 'ModEM')
# wd = os.path.join(SAMPLE_DIR, 'ModEM_2')
#
# # file stem for inversion result
# filestem = 'Modular_MPI_NLCG_004'
#
# datafn = 'ModEM_Data.dat'
#
# # station = 'pb23'
# station = 'Synth02'
# plot_z = False
#
# ro = PlotResponse(data_fn=os.path.join(wd, datafn),
# resp_fn=os.path.join(wd, filestem + '.dat'),
# plot_type=[station],
# plot_style=2,
# plot_z=plot_z)
# ro.plot()
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-d','--directory',type=str,default=r'examples/model_files/ModEM_2',help='directory for data files')
@click.option('-s','--stem_data_file',type=str,default='Modular_MPI_NLCG_004.dat', help='file stem')
@click.option('-i','--input_data_file',type=str,default='ModEM_Data.dat', help='Data File')
@click.option('-c','--collection_station',type=str,default='Synth02', help='Data Collection station')
@click.option('-p','--plot_z',type=bool,default=False, help=
'[True | False ] Plot True for Impedence, False for Resistivity and Phsse')
@click.option('-f','--font_size',type=int,default=2, help='Plot Text Fond Size ')
def merge_plotting(directory, stem_data_file, input_data_file, collection_station,plot_z, font_size):
print("============================================================================")
print("")
print("Following are the examples for running plot_response : ")
print("")
print("python mtpy/imaging/plot_response.py [--help | -h ]")
print("python mtpy/imaging/plot_response.py")
print("python mtpy/imaging/plot_response.py -d examples\data\ModeEM_files_Test " +
"-s Modular_MPI_NLCG_094.dat -i ModEM_Data.dat -c GB09 -p False -f 3")
print("python mtpy/imaging/plot_response.py -d examples\data\ModeEM_files_Test -p False ( Changing Plot types ) ")
print("")
print("============================================================================")
ro = PlotResponse(data_fn=os.path.join(directory, input_data_file),
resp_fn=os.path.join(directory, stem_data_file),
plot_type=[collection_station],
plot_style=2,
plot_z=plot_z,
font_size=font_size)
ro.plot()
if __name__ == "__main__":
from mtpy.mtpy_globals import *
merge_plotting()
|
MTgeophysics/mtpy
|
legacy/plot_response.py
|
Python
|
gpl-3.0
| 114,878
|
[
"VTK"
] |
e4e26504fb5b3c69bf2d6c76fc86102d10ab5e4ff80696edf8ba92d7d702324f
|
from __future__ import division
import warnings
import os, sys
import numpy as np
import scipy.spatial
import scipy.weave
import scipy.stats.kde
import matplotlib.pyplot as pp
import bisect
# apparently scipy.weave is depricated. I really shouldn't have used it.
# the best thing would probably be to port entropy_nn() -- or at least
# the subroutineroutine that's in weave -- to cython. The other methods
# are basically pure numpy, so they're not going to achieve any speedup
# in cython.
EULER_MASCHERONI = 0.57721566490153286060651209008240243104215933593992
"""
Author:
https://github.com/rmcgibbo/information/blob/master/entropy.py
Three estimators for the entropy of continuous random variables (one dimension)
entropy_bin() uses the common histogram approach. In addition to the data, it
requires specifying a bin width.
entropy_ci() uses the first order correlation integral, which is like a niave
kernel density estimator. In addition to the data, it required specifying a
neighborhood radius (kernel bandwidth), which analogous to a (half) bin width
for the histogram estimator.
entropy_nn uses the distribution of nearest neighbor distances. It requires no
adjustable parameters.
Using some simulations with various bandwidths, my experience is that the
nearest neighbor estimator has the lowest bias, but the highest variance. The
correlation integral estimator is probably the best, especially with a well
chosen neighbor radius. The histogram methods tends to underestimate the entropy.
I suspect a kernel density estimator using a gaussian kernel would be even better,
but that is not implemented. The entropy_ci() estimator uses basically a square
kernel.
"""
def entropy_bin(data, width):
"""Entropy of a 1D signal by binning
Data beyond three standard deviations from the mean is discarded.
Parameters
----------
data : array_like
The data, a 1D sample of samples of a random variable.
width : float
The bin width of the histogram.
Returns
-------
h : float
The estimated entropy
.. [4] Moddemeijer, R. "On estimation of entropy and mutual information of continuous distributions",
Signal Processing 16 233 (1989)
"""
#if int(n_bins) != n_bins:
# raise ValueError('n_bins must be an int, not %s' % n_bins)
# upper = int(np.max(data))
# lower = int(np.min(data))
# bins = np.arange(lower, upper, step=width)
# print bins, len(bins)
# #bins = np.linspace(lower, upper, n_bins+1)
# bin_widths = bins[1:] - bins[0:-1]
# try:
# counts, bins = np.histogram(data, bins)
# except Exception as e:
# print e
# print data, bins, width, upper, lower, np.arange(lower, upper, step=width), bin_widths
# return None
upper = float(np.max(data))
lower = float(np.min(data))
bins = np.arange(lower, upper, step=width)
#bins = np.linspace(lower, upper, n_bins+1)
bin_widths = bins[1:] - bins[0:-1]
counts, bins = np.histogram(data, bins)
p = counts / np.sum(counts)
# ignore zero entries, and we can't forget to apply the analytic correction
# for the bin width!
bin_widths = np.compress(list(p != 0.), bin_widths)
p = np.compress(list(p != 0.), p)
entropy = -np.sum(p*(np.log2(p) - np.log2(bin_widths)))
return entropy
def entropy_ke(data):
"""Estimate the entropy of a continuous 1D signal using a kernel approach
Ahmad, I., and Lin, P. "A nonparametric estimation of the entropy for
absolutely continuous distributions (Corresp.)," IEEE Trans. Inf. Theory,
22 375 (1976)
"""
pass
def entropy_nn(data, presorted=False):
"""Estimate the entropy of a continuous 1D signal using the distribution
of nearest neighbor distances
.. math::
H(x) = \frac{1}{n} \sum_i=1^n \ln(n*\rho_i) + \ln 2 + \gamma
Where `H(x)` is the entropy of the signal x, `n` is the length of the signal, `rho_i`
is the distance from `x_i` to its nearest neighbor `x_j` in the dataset, and gamma
is the Euler-Mascheroni constant
Parameters
----------
data : array_like, ndims=1, dtype=float64
A 1D continuous signal
presorted : boolean, optional
Is the `data` array presorted? The rate limiting step of this calculation is sorting
the data array. So if you've already sorted it, you can make this go a little faster
by passing true.
Returns
-------
h : float
The estimated entropy
[1] Beirlant, J. Dudewicz, E. J. Gyoerfi, L. Van der Meulen, E. C.,
"Nonparametric entropy estimation: An overview", Int. J. Math Stat. Sci.
6 17 (1997) http://jimbeck.caltech.edu/summerlectures/references/Entropy%20estimation.pdf
"""
if data.ndim != 1:
raise ValueError('Only 1D supported')
data = np.array(data, dtype=np.float64)
if not presorted:
data = np.sort(data)
n = len(data)
nearest_distances = np.zeros(n, dtype=np.float64)
# populate the array nearest_distances s.t.
# nd_i = \min{j < n; j \neq i} (|| data_i - data_j ||)
# or in otherwords, nearest_distances[i] gives the distance
# from data[i] to the other data point which it is nearest to
# we do this in nlogn time by sorting, but then to the iteration
# over the sorted array in c because python linear time is way longer
# than C nlog(n) for moderate n.
scipy.weave.inline(r'''
int i;
double distance, left_distance, right_distance;
// populate the end points manually
nearest_distances[0] = data[1] - data[0];
nearest_distances[n-1] = data[n-1] - data[n-2];
// iterate over the interior points, checking if they're closer to their
// left or right neighbor.
left_distance = nearest_distances[0];
for (i = 1; i < n - 1; i++) {
left_distance = right_distance;
right_distance = data[i + 1] - data[i];
distance = left_distance < right_distance ? left_distance : right_distance;
nearest_distances[i] = distance;
}
''', ['data', 'n', 'nearest_distances'])
return np.mean(np.log(n*nearest_distances)) + np.log(2) + EULER_MASCHERONI
def entropy_ci(data, radius, est_max_neighbors_within_radius=16):
"""Estimate the entropy of a continuous 1D signal using the generalized correlation integral
Parameters
----------
data : array_like
est_max_neighbors_within_radius : int, optional
Estimate of the maximum number of datapoints within the specified radius from any trial point.
# we need to find ALL the data points within radius, but there problem is
# that kdtree requires we set a number to find (k) in addition to the distance_upper_bound
# so we need to set the number large enough that we can really find all.
# but since it allocates arrays of size (n x k), we don't want it to be too big.
Returns
-------
h : float
The estimated entropy
References
----------
[2] Prichard, D. and Theiler, J. "Generalized Redundancies for Time Series Analysis", Physica D. 84 476 (1995)
http://arxiv.org/pdf/comp-gas/9405006.pdf
[3] Pawelzik, K. and Schuster, H. G; "Generalized dimensions and entropies from a measured time series",
Phys. Rev. A; 35 481 (1987)
"""
n = len(data)
if data.ndim != 1:
raise ValueError('Only 1D supported')
data = np.sort(data)
n_neighbors = np.zeros(n, dtype=np.int)
for i in xrange(n):
high = bisect.bisect_left(data, data[i] + radius, lo=i)
low = bisect.bisect_right(data, data[i] - radius, lo=0, hi=i)
# number of data points excluding i that are within data[i] - radius and data[i] + radius
n_neighbors[i] = high - low - 1
# DEBUG
# assert n_neighbors[i] == np.count_nonzero((data < data[i] + radius) & (data > data[i] - radius)) - 1
# assert np.all(data[low:high] < data[i] + radius)
# assert np.all(data[low:high] > data[i] - radius)
fraction_neighbors = n_neighbors / n
# exclude the bins where n_neighbors is zero
# equation 20, 22 in [2]
# note, the paper seems to have left out the log(radius) term, but it's pretty
# obvious that it's supposed to be there. It's very analogous to the histogram
# estimator. You will also see an obvious dependence of the mean of the entropy
# estimate on the bin width if you don't use it
entropy = -np.mean(np.compress(n_neighbors > 0, np.log(fraction_neighbors) - np.log(2*radius)))
return entropy
def main():
"""Generate some random samples and compare the entropy estimators. This will make some plots.
The idea is to run M trials where we generate N points and calculate their entropy. Then we plot,
for each estimator, the empirical distribution of the estimates over the M trials.
"""
n_trials = 150
n_pts = 10000
bin_entropies_01, nn_entropies, cgi_entropies_01 = [], [], []
bin_entropies_03, cgi_entropies_03 = [], []
bin_entropies_02, cgi_entropies_02 = [], []
#entropy_bin2(np.random.randn(1000), 30)
for i in range(n_trials):
print 'trial', i
#data = np.random.randn(n_pts)
data = np.random.exponential(size=n_pts)
nn_entropies.append(entropy_nn(data))
bin_entropies_01.append(entropy_bin(data, 0.05))
cgi_entropies_01.append(entropy_ci(data, 0.05))
bin_entropies_02.append(entropy_bin(data, 0.2))
cgi_entropies_02.append(entropy_ci(data, 0.2))
bin_entropies_03.append(entropy_bin(data, 0.3))
cgi_entropies_03.append(entropy_ci(data, 0.3))
pp.figure(figsize=(15,8))
plot_gkde(nn_entropies, label='nn_entropyes')
plot_gkde(cgi_entropies_01, label='cgi entropies 0.1')
plot_gkde(cgi_entropies_02, label='cgi entropies 0.2')
plot_gkde(cgi_entropies_03, label='cgi entropies 0.3')
plot_gkde(bin_entropies_01, label='bin entropies 0.1')
plot_gkde(bin_entropies_02, label='bin entropies 0.2')
plot_gkde(bin_entropies_03, label='bin entropies 0.3')
#analytic = 0.5*np.log(2*np.pi*np.e)
analytic = 1
print analytic
pp.plot([analytic, analytic], [0, 20], 'k', linewidth=5)
pp.legend()
pp.show()
def plot_gkde(data, *args, **kwargs):
"""Plot a gaussia kernel density estimator. *args and **kwargs will be passed
directory to pyplot.plot()"""
kde = scipy.stats.gaussian_kde(data)
lower = np.mean(data) - 3*np.std(data)
upper = np.mean(data) + 3*np.std(data)
x = np.linspace(lower, upper, 100)
y = kde(x)
pp.plot(x, y, *args, **kwargs)
if __name__ == '__main__':
main()
|
rampasek/seizure-prediction
|
features/entropy.py
|
Python
|
gpl-2.0
| 10,855
|
[
"Gaussian"
] |
86e9a9f8405d4c86ba975e7ceeb20b149f43057af067150b30097cebbffabb66
|
# -*- coding: utf-8 -*-
"""
@author: nicholas
"""
import sys
import logging
import shutil
import os
import unittest
import time
from unittest.mock import Mock
from riboSeed.shared_methods import md5
from riboSeed.riboScore import getSnagCmd, getSelectCmd, getScanCmd, \
parseDirContents, make_nuc_nuc_recip_blast_cmds, merge_outfiles, \
BLAST_tab_to_df, filter_recip_BLAST_df, checkBlastForMisjoin, \
check_scan_select_snag_retruncodes
sys.dont_write_bytecode = True
logger = logging
@unittest.skipIf((sys.version_info[0] != 3) or (sys.version_info[1] < 5),
"Subprocess.call among other things wont run if tried " +
" with less than python 3.5")
class riboScoreTestCase(unittest.TestCase):
""" tests for riboSeed.py
"""
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__),
"output_riboScore_tests")
self.ref_dir = os.path.join(
os.path.dirname(__file__), "references", "")
self.score_ref_dir = os.path.join(
os.path.dirname(__file__),
"references",
"riboScore_references", "")
self.test_combine = os.path.join(
os.path.dirname(__file__),
"references",
"riboScore_references",
"test_combineA.tab")
self.test_forward = os.path.join(
os.path.dirname(__file__),
"references",
"riboScore_references",
"forward.tab")
self.test_reverse = os.path.join(
os.path.dirname(__file__),
"references",
"riboScore_references",
"reverse.tab")
self.startTime = time.time()
self.to_be_removed = []
def test_parseDirContents(self):
lst = parseDirContents(
dirname=self.ref_dir, ref_ext="gb", assembly_ext="fasta")
print(lst)
def test_getScanCmd(self):
res = getScanCmd(ref="test.fa", outroot="outdir", other_args="")
res2 = getScanCmd(ref="test.gb", outroot="outdir", other_args="")
ref_cmd = "ribo scan test.fa --min_length 5000 -o outdir{2}scan".format(
sys.executable,
os.path.join("..", "..",
os.path.dirname(os.path.dirname(__file__)),
"riboSeed",
"riboScan.py"),
os.path.sep)
self.assertEqual(
res[0],
ref_cmd)
self.assertEqual(
res[1], os.path.join("outdir", "scan", "scannedScaffolds.gb"))
self.assertEqual(res2[0], None)
def test_getSelectCmd(self):
res = getSelectCmd(gb="test.gb", outroot="outdir",
other_args="-s 16S:23S")
ref_cmd = "ribo select test.gb -o outdir{2}select -s 16S:23S".format(
sys.executable,
os.path.join(
"..", "..",
os.path.dirname(os.path.dirname(__file__)),
"riboSeed",
"riboSelect.py"),
os.path.sep)
self.assertEqual(res[0], ref_cmd)
self.assertEqual(
res[1],
os.path.join("outdir", "select", "riboSelect_grouped_loci.txt"))
def test_getSnagCmd(self):
res = getSnagCmd(scangb="test.gb", cluster="clusters.txt",
flank=20, outroot="outdir", other_args="")
ref_cmd = "ribo snag test.gb clusters.txt -l 20 --just_extract -o outdir{2}snag".format(
sys.executable,
os.path.join(
"..", "..",
os.path.dirname(os.path.dirname(__file__)),
"riboSeed",
"riboSnag.py"),
os.path.sep)
self.assertEqual(res[0], ref_cmd)
def test_make_nuc_nuc_recip_blast_cmds(self):
cmds, forward, recip = make_nuc_nuc_recip_blast_cmds(
query_list=["assembly1.fasta", "assembly2.fasta"],
output="outdir", subject_file="reference", logger=logger)
self.assertEqual(
cmds[0],
"blastn -out outdir/assembly1_vs_ref.tab -outfmt 6 " +
"-query assembly1.fasta -subject reference -num_threads 1 " +
"-num_alignments 50")
def test_merge_outfiles(self):
"""
"""
merged_tab = merge_outfiles(
filelist=[self.test_combine, self.test_combine],
outfile=os.path.join(self.score_ref_dir, "temp_combined.tab"))
self.assertEqual(
md5(os.path.join(self.score_ref_dir, "test_combined.tab")),
md5(merged_tab))
self.to_be_removed.append(merged_tab)
def test_single_merge_outfiles(self):
"""
"""
merged_tab = merge_outfiles(
filelist=[self.test_combine],
outfile=os.path.join(self.score_ref_dir, "temp_combined.tab"))
self.assertEqual(merged_tab, [self.test_combine])
def test_BLAST_tab_to_df(self):
colnames = [
"query_id", "subject_id", "identity_perc", "alignment_length",
"mismatches", "gap_opens", "q_start", "q_end", "s_start",
"s_end", "evalue", "bit_score"]
resultsdf = BLAST_tab_to_df(self.test_combine)
self.assertEqual(resultsdf.columns.values.tolist(), colnames)
def test_recip_blast(self):
""" reciprocal blast testing.
It doesnt really test much efficiently
"""
df1 = BLAST_tab_to_df(self.test_forward)
df2 = BLAST_tab_to_df(self.test_reverse)
filtered_hits = filter_recip_BLAST_df(
df1=df1,
df2=df2,
min_lens={"concatenated_genome_4001..10887": 500},
min_percent=99.5,
logger=logger)
self.assertEqual(filtered_hits.shape, (2, 13))
def test_checkBlastForMisjoin(self):
df2 = BLAST_tab_to_df(self.test_reverse)
flanking_hits = checkBlastForMisjoin(
fasta="mock.fasta",
df=df2,
ref_lens={"concatenated_genome_4001..10887": 500},
flanking=1000,
BUF=50, logger=logger)
self.assertEqual(
flanking_hits[0],
["mock.fasta", "?",
"NODE_1_length_105529_cov_19.8862_0_94652..101540_RC_",
"concatenated_genome_4001..10887", "?"])
def test_check_scan_select_snag_fail1(self):
reslist = []
for i in [1, 0, 0]:
submock = Mock()
submock.returncode = i
reslist.append(submock)
with self.assertRaises(SystemExit):
check_scan_select_snag_retruncodes(
subreturns=reslist, logger=logger)
def test_check_scan_select_snag_fail2(self):
reslist = []
for i in [0, 1, 0]:
submock = Mock()
submock.returncode = i
reslist.append(submock)
with self.assertRaises(SystemExit):
check_scan_select_snag_retruncodes(
subreturns=reslist, logger=logger)
def test_check_scan_select_snag_nofail(self):
reslist = []
for i in [0, 0, 1]:
submock = Mock()
submock.returncode = i
reslist.append(submock)
check_scan_select_snag_retruncodes(
subreturns=reslist, logger=logger)
def tearDown(self):
""" delete temp files if no errors
"""
for filename in self.to_be_removed:
try:
os.unlink(filename)
except IsADirectoryError:
shutil.rmtree(filename)
t = time.time() - self.startTime
print("%s: %.3f" % (self.id(), t))
if __name__ == '__main__':
unittest.main()
|
nickp60/riboSeed
|
tests/test_riboScore.py
|
Python
|
mit
| 7,679
|
[
"BLAST"
] |
f680bc9f556fe4df9e8c0726b9153447d327b3ab06390806c3adf074bd814f62
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: BSD-3-Clause
import os
import os.path as op
import sys
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT, _handle_default
from ..transforms import _frame_to_str
from ..utils import (verbose, logger, warn,
_check_preload, _validate_type, fill_doc, _check_option,
_get_stim_channel, _check_fname, _check_dict_keys)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import (anonymize_info, Info, MontageMixin, create_info,
_rename_comps)
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx,
_get_channel_types, get_channel_type_constants,
_pick_data_channels)
from ..io.tag import _rename_list
from ..io.write import DATE_NONE
from ..io.proj import setup_proj
from ..io._digitization import _get_data_as_dict_from_dig
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
nmag = np.sum(
[c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
# Our helmet does not match very well, so let's just create it
have_helmet = False
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',
'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude',
'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr',
'ecog', 'seeg', 'dbs']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(instances, copy=True, verbose=None):
"""Equalize channel picks and ordering across multiple MNE-Python objects.
First, all channels that are not common to each object are dropped. Then,
using the first object in the list as a template, the channels of each
object are re-ordered to match the template. The end result is that all
given objects define the same channels, in the same order.
Parameters
----------
instances : list
A list of MNE-Python objects to equalize the channels for. Objects can
be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,
CrossSpectralDensity or Info.
copy : bool
When dropping and/or re-ordering channels, an object will be copied
when this parameter is set to ``True``. When set to ``False`` (the
default) the dropping and re-ordering of channels happens in-place.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
equalized_instances : list
A list of MNE-Python objects that have the same channels defined in the
same order.
Notes
-----
This function operates inplace.
"""
from ..cov import Covariance
from ..io.base import BaseRaw
from ..io.meas_info import Info
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..forward import Forward
from ..time_frequency import _BaseTFR, CrossSpectralDensity
# Instances need to have a `ch_names` attribute and a `pick_channels`
# method that supports `ordered=True`.
allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,
Covariance, CrossSpectralDensity, Info)
allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, "
"CrossSpectralDensity or Info")
for inst in instances:
_validate_type(inst, allowed_types, "Instances to be modified",
allowed_types_str)
chan_template = instances[0].ch_names
logger.info('Identifying common channels ...')
channels = [set(inst.ch_names) for inst in instances]
common_channels = set(chan_template).intersection(*channels)
all_channels = set(chan_template).union(*channels)
dropped = list(set(all_channels - common_channels))
# Preserve the order of chan_template
order = np.argsort([chan_template.index(ch) for ch in common_channels])
common_channels = np.array(list(common_channels))[order].tolist()
# Update all instances to match the common_channels list
reordered = False
equalized_instances = []
for inst in instances:
# Only perform picking when needed
if inst.ch_names != common_channels:
if copy:
inst = inst.copy()
inst.pick_channels(common_channels, ordered=True)
if len(inst.ch_names) == len(common_channels):
reordered = True
equalized_instances.append(inst)
if dropped:
logger.info('Dropped the following channels:\n%s' % dropped)
elif reordered:
logger.info('Channels have been re-ordered.')
return equalized_instances
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
return _get_channel_types(self.info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
@fill_doc
def get_montage(self):
"""Get a DigMontage from instance.
Returns
-------
%(montage)s
"""
from ..channels.montage import make_dig_montage
if self.info['dig'] is None:
return None
# obtain coord_frame, and landmark coords
# (nasion, lpa, rpa, hsp, hpi) from DigPoints
montage_bunch = _get_data_as_dict_from_dig(self.info['dig'])
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# get the channel names and chs data structure
ch_names, chs = self.info['ch_names'], self.info['chs']
picks = pick_types(self.info, meg=False, eeg=True, seeg=True,
ecog=True, dbs=True, fnirs=True, exclude=[])
# channel positions from dig do not match ch_names one to one,
# so use loc[:3] instead
ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks}
# create montage
montage = make_dig_montage(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return montage
channel_type_constants = get_channel_type_constants()
_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in
channel_type_constants.items()}
_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in
channel_type_constants.items()}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(MontageMixin):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', forward=None, verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
%(set_eeg_reference_ref_channels)s
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
%(set_eeg_reference_see_also_notes)s
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type,
forward=forward)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float64)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
@verbose
def set_channel_types(self, mapping, verbose=None):
"""Define the sensor type of channels.
Parameters
----------
mapping : dict
A dictionary mapping a channel to a sensor type (str), e.g.,
``{'EEG061': 'eog'}``.
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst,
ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude,
fnirs_fd_phase, fnirs_od
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_cw_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE
elif ch_type == 'fnirs_fd_ac_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE
elif ch_type == 'fnirs_fd_phase':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
return self
@verbose
def rename_channels(self, mapping, allow_duplicates=False, verbose=None):
"""Rename channels.
Parameters
----------
%(rename_channels_mapping_duplicates)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..io import BaseRaw
ch_names_orig = list(self.info['ch_names'])
rename_channels(self.info, mapping, allow_duplicates)
# Update self._orig_units for Raw
if isinstance(self, BaseRaw):
# whatever mapping was provided, now we can just use a dict
mapping = dict(zip(ch_names_orig, self.info['ch_names']))
if self._orig_units is not None:
for old_name, new_name in mapping.items():
if old_name != new_name:
self._orig_units[new_name] = self._orig_units[old_name]
del self._orig_units[old_name]
ch_names = self.annotations.ch_names
for ci, ch in enumerate(ch_names):
ch_names[ci] = tuple(mapping.get(name, name) for name in ch)
return self
@verbose
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True, sphere=None,
verbose=None):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag,
grad, eeg, seeg, dbs, and ecog channels are plotted. If
None (default), then channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose_meth)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show, sphere=sphere, verbose=verbose)
@verbose
def anonymize(self, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
Parameters
----------
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
Notes
-----
%(anonymize_info_notes)s
.. versionadded:: 0.13.0
"""
anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
verbose=verbose)
self.set_meas_date(self.info['meas_date']) # unify annot update
return self
def set_meas_date(self, meas_date):
"""Set the measurement start date.
Parameters
----------
meas_date : datetime | float | tuple | None
The new measurement date.
If datetime object, it must be timezone-aware and in UTC.
A tuple of (seconds, microseconds) or float (alias for
``(meas_date, 0)``) can also be passed and a datetime
object will be automatically created. If None, will remove
the time reference.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified raw instance. Operates in place.
See Also
--------
mne.io.Raw.anonymize
Notes
-----
If you want to remove all time references in the file, call
:func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`
after calling ``inst.set_meas_date(None)``.
.. versionadded:: 0.20
"""
from ..annotations import _handle_meas_date
meas_date = _handle_meas_date(meas_date)
with self.info._unlock():
self.info['meas_date'] = meas_date
# clear file_id and meas_id if needed
if meas_date is None:
for key in ('file_id', 'meas_id'):
value = self.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
if hasattr(self, 'annotations'):
self.annotations._orig_time = meas_date
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=False, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False,
ecog=False, fnirs=False, csd=False, dbs=False, include=(),
exclude='bads', selection=None, verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all
gradiometers, or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is
not False. Can also be the string options for the ``meg``
parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If ``True`` include respiratory channels.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
EEG-CSD channels.
dbs : bool
Deep brain stimulation channels.
include : list of str
List of additional channels to include. If empty do not include
any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, dbs=dbs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
# remove dropped channel types from reject and flat
if getattr(self, 'reject', None) is not None:
# use list(self.reject) to avoid RuntimeError for changing
# dictionary size during iteration
for ch_type in list(self.reject):
if ch_type not in self:
del self.reject[ch_type]
if getattr(self, 'flat', None) is not None:
for ch_type in list(self.flat):
if ch_type not in self:
del self.flat[ch_type]
return self
def pick_channels(self, ch_names, ordered=False):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
ordered : bool
If True (default False), ensure that the order of the channels in
the modified instance matches the order of ``ch_names``.
.. versionadded:: 0.20.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)
return self._pick_drop_channels(picks)
@verbose
def pick(self, picks, exclude=(), *, verbose=None):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
%(verbose)s
.. versionadded:: 0.24.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
@verbose
def _pick_drop_channels(self, idx, *, verbose=None):
# avoid circular imports
from ..io import BaseRaw
from ..time_frequency import AverageTFR, EpochsTFR
msg = 'adding, dropping, or reordering channels'
if isinstance(self, BaseRaw):
if self._projector is not None:
_check_preload(self, f'{msg} after calling .apply_proj()')
else:
_check_preload(self, msg)
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if getattr(self, '_read_picks', None) is not None:
self._read_picks = [r[idx] for r in self._read_picks]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
for key in ('_comp', '_projector'):
mat = getattr(self, key, None)
if mat is not None:
setattr(self, key, mat[idx][:, idx])
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
if hasattr(self, '_data'): # skip non-preloaded Raw
self._data = self._data.take(idx, axis=axis)
else:
assert isinstance(self, BaseRaw) and not self.preload
if isinstance(self, BaseRaw):
self.annotations._prune_ch_names(self.info, on_missing='ignore')
self._pick_projs()
return self
def _pick_projs(self):
"""Keep only projectors which apply to at least 1 data channel."""
drop_idx = []
for idx, proj in enumerate(self.info['projs']):
if not set(self.info['ch_names']) & set(proj['data']['col_names']):
drop_idx.append(idx)
for idx in drop_idx:
logger.info(f"Removing projector {self.info['projs'][idx]}")
if drop_idx and hasattr(self, 'del_proj'):
self.del_proj(drop_idx)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object.
force_update_info : bool
If True, force the info for objects to be appended to match the
values in ``self``. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
# We should never use these since data are preloaded, let's just
# set it to something large and likely to break (2 ** 31 - 1)
extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])
assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)
self._read_picks = [
np.concatenate([r, extra_idx]) for r in self._read_picks]
assert all(len(r) == self.info['nchan'] for r in self._read_picks)
elif isinstance(self, BaseEpochs):
self.picks = np.arange(self._data.shape[1])
if hasattr(self, '_projector'):
activate = False if self._do_delayed_proj else self.proj
self._projector, self.info = setup_proj(self.info, False,
activate=activate)
return self
@fill_doc
def add_reference_channels(self, ref_channels):
"""Add reference channels to data that consists of all zeros.
Adds reference channels to data that were not included during
recording. This is useful when you need to re-reference your data
to different channels. These added channels will consist of all zeros.
Parameters
----------
%(ref_channels)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
"""
from ..io.reference import add_reference_channels
return add_reference_channels(self, ref_channels, copy=False)
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin='auto', method=None, exclude=(),
verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of channels
using the minimum-norm method.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'`` (default), which means a head-digitization-based
origin fit.
.. versionadded:: 0.17
method : dict
Method to use for each channel type.
Currently only the key "eeg" has multiple options:
- ``"spline"`` (default)
Use spherical spline interpolation.
- ``"MNE"``
Use minimum-norm projection to a sphere and back.
This is the method used for MEG channels.
The value for "meg" is "MNE", and the value for
"fnirs" is "nearest". The default (None) is thus an alias for::
method=dict(meg="MNE", eeg="spline", fnirs="nearest")
.. versionadded:: 0.21
exclude : list | tuple
The channels to exclude from interpolation. If excluded a bad
channel will stay in bads.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..bem import _check_origin
from .interpolation import _interpolate_bads_eeg,\
_interpolate_bads_meeg, _interpolate_bads_nirs
_check_preload(self, "interpolation")
method = _handle_default('interpolation_method', method)
for key in method:
_check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))
_check_option("method['eeg']", method['eeg'], ('spline', 'MNE'))
_check_option("method['meg']", method['meg'], ('MNE',))
_check_option("method['fnirs']", method['fnirs'], ('nearest',))
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
logger.info('Interpolating bad channels')
origin = _check_origin(origin, self.info)
if method['eeg'] == 'spline':
_interpolate_bads_eeg(self, origin=origin, exclude=exclude)
eeg_mne = False
else:
eeg_mne = True
_interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne,
exclude=exclude)
_interpolate_bads_nirs(self, exclude=exclude)
if reset_bads is True:
self.info['bads'] = \
[ch for ch in self.info['bads'] if ch in exclude]
return self
@verbose
def rename_channels(info, mapping, allow_duplicates=False, verbose=None):
"""Rename channels.
Parameters
----------
%(info_not_none)s Note: modified in place.
%(rename_channels_mapping_duplicates)s
%(verbose)s
"""
_validate_type(info, Info, 'info')
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
_check_dict_keys(mapping, ch_names, key_description="channel name(s)",
valid_key_source="info")
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates:
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
ch_names_mapping = dict()
for ch, ch_name in zip(info['chs'], ch_names):
ch_names_mapping[ch['ch_name']] = ch_name
ch['ch_name'] = ch_name
# .get b/c fwd info omits it
_rename_comps(info.get('comps', []), ch_names_mapping)
if 'projs' in info: # fwd might omit it
for proj in info['projs']:
proj['data']['col_names'][:] = \
_rename_list(proj['data']['col_names'], ch_names_mapping)
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_adjacency(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtriptoolbox.org/template/neighbours/>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
find_ch_adjacency
Notes
-----
This function is closely related to :func:`find_ch_adjacency`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_adjacency` can compute the adjacency matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
adjacency = _ch_neighbor_adjacency(ch_names, neighbors)
# picking before constructing matrix is buggy
adjacency = adjacency[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return adjacency, ch_names
def _ch_neighbor_adjacency(ch_names, neighbors):
"""Compute sensor adjacency matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_adjacency : scipy.sparse matrix
The adjacency matrix.
"""
from scipy import sparse
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_adjacency = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True
ch_adjacency = sparse.csr_matrix(ch_adjacency)
return ch_adjacency
@fill_doc
def find_ch_adjacency(info, ch_type):
"""Find the adjacency matrix for the given channels.
This function tries to infer the appropriate adjacency matrix template
for the given channels. If a template is not found, the adjacency matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
%(info_not_none)s
ch_type : str | None
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
read_ch_adjacency
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate adjacency matrix template only
works for MEG data at the moment. This means that the adjacency matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_adjacency` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading adjacency matrix for %s.' % conn_name)
return read_ch_adjacency(conn_name)
logger.info('Could not find a adjacency matrix for the data. '
'Computing adjacency based on Delaunay triangulations.')
return _compute_ch_adjacency(info, ch_type)
@fill_doc
def _compute_ch_adjacency(info, ch_type):
"""Compute channel adjacency matrix using Delaunay triangulations.
Parameters
----------
%(info_not_none)s
ch_type : str
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
"""
from scipy import sparse
from scipy.spatial import Delaunay
from .. import spatial_tris_adjacency
from ..channels.layout import _find_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad'
and any([coil_type in [ch['coil_type']
for ch in info['chs']]
for coil_type in
[FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_NM_122]]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute adjacency '
'matrix.')
# only for one of the pair
xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)
else:
xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)
tri = Delaunay(xy)
neighbors = spatial_tris_adjacency(tri.simplices)
if combine_grads:
ch_adjacency = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True
ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_adjacency = sparse.csr_matrix(ch_adjacency)
else:
ch_adjacency = sparse.lil_matrix(neighbors)
ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))
ch_adjacency = ch_adjacency.tocsr()
return ch_adjacency, ch_names
@fill_doc
def fix_mag_coil_types(info, use_cal=False):
"""Fix magnetometer coil types.
Parameters
----------
%(info_not_none)s Corrections are done in-place.
use_cal : bool
If True, further refine the check for old coil types by checking
``info['chs'][ii]['cal']``.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info, use_cal)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info, use_cal=False):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
# From email exchanges, systems with the larger T2 coil only use the cal
# value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10
# (Triux). So we can use a simple check for > 3e-11.
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
if use_cal:
if ch['cal'] > 3e-11:
old_mag_inds.append(ii)
else:
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,
has_csd_coils)
@fill_doc
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in ``midline`` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
%(info_not_none)s If possible, the channel lists will be sorted
posterior-to-anterior; otherwise they default to the order specified in
``info["ch_names"]``.
midline : str
Names ending in any of these characters are stored under the
``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
def combine_channels(inst, groups, method='mean', keep_stim=False,
drop_bad=False):
"""Combine channels based on specified channel grouping.
Parameters
----------
inst : instance of Raw, Epochs, or Evoked
An MNE-Python object to combine the channels for. The object can be of
type Raw, Epochs, or Evoked.
groups : dict
Specifies which channels are aggregated into a single channel, with
aggregation method determined by the ``method`` parameter. One new
pseudo-channel is made per dict entry; the dict values must be lists of
picks (integer indices of ``ch_names``). For example::
groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
method : str | callable
Which method to use to combine channels. If a :class:`str`, must be one
of 'mean', 'median', or 'std' (standard deviation). If callable, the
callable must accept one positional input (data of shape ``(n_channels,
n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,
n_times)``. For example with an instance of Raw or Evoked::
method = lambda data: np.mean(data, axis=0)
Another example with an instance of Epochs::
method = lambda data: np.median(data, axis=1)
Defaults to ``'mean'``.
keep_stim : bool
If ``True``, include stimulus channels in the resulting object.
Defaults to ``False``.
drop_bad : bool
If ``True``, drop channels marked as bad before combining. Defaults to
``False``.
Returns
-------
combined_inst : instance of Raw, Epochs, or Evoked
An MNE-Python object of the same type as the input ``inst``, containing
one virtual channel for each group in ``groups`` (and, if ``keep_stim``
is ``True``, also containing stimulus channels).
"""
from ..io import BaseRaw, RawArray
from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray
ch_axis = 1 if isinstance(inst, BaseEpochs) else 0
ch_idx = list(range(inst.info['nchan']))
ch_names = inst.info['ch_names']
ch_types = inst.get_channel_types()
inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()
groups = OrderedDict(deepcopy(groups))
# Convert string values of ``method`` into callables
# XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py
if isinstance(method, str):
method_dict = {key: partial(getattr(np, key), axis=ch_axis)
for key in ('mean', 'median', 'std')}
try:
method = method_dict[method]
except KeyError:
raise ValueError('"method" must be a callable, or one of "mean", '
f'"median", or "std"; got "{method}".')
# Instantiate channel info and data
new_ch_names, new_ch_types, new_data = [], [], []
if not isinstance(keep_stim, bool):
raise TypeError('"keep_stim" must be of type bool, not '
f'{type(keep_stim)}.')
if keep_stim:
stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))
if stim_ch_idx:
new_ch_names = [ch_names[idx] for idx in stim_ch_idx]
new_ch_types = [ch_types[idx] for idx in stim_ch_idx]
new_data = [np.take(inst_data, idx, axis=ch_axis)
for idx in stim_ch_idx]
else:
warn('Could not find stimulus channels.')
# Get indices of bad channels
ch_idx_bad = []
if not isinstance(drop_bad, bool):
raise TypeError('"drop_bad" must be of type bool, not '
f'{type(drop_bad)}.')
if drop_bad and inst.info['bads']:
ch_idx_bad = pick_channels(ch_names, inst.info['bads'])
# Check correctness of combinations
for this_group, this_picks in groups.items():
# Check if channel indices are out of bounds
if not all(idx in ch_idx for idx in this_picks):
raise ValueError('Some channel indices are out of bounds.')
# Check if heterogeneous sensor type combinations
this_ch_type = np.array(ch_types)[this_picks]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; '
f'"{this_group}" contains types {types}.')
# Remove bad channels
these_bads = [idx for idx in this_picks if idx in ch_idx_bad]
this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]
if these_bads:
logger.info('Dropped the following channels in group '
f'{this_group}: {these_bads}')
# Check if combining less than 2 channel
if len(set(this_picks)) < 2:
warn(f'Less than 2 channels in group "{this_group}" when '
f'combining by method "{method}".')
# If all good create more detailed dict without bad channels
groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])
# Combine channels and add them to the new instance
for this_group, this_group_dict in groups.items():
new_ch_names.append(this_group)
new_ch_types.append(this_group_dict['ch_type'])
this_picks = this_group_dict['picks']
this_data = np.take(inst_data, this_picks, axis=ch_axis)
new_data.append(method(this_data))
new_data = np.swapaxes(new_data, 0, ch_axis)
info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,
ch_types=new_ch_types)
if isinstance(inst, BaseRaw):
combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,
verbose=inst.verbose)
elif isinstance(inst, BaseEpochs):
combined_inst = EpochsArray(new_data, info, events=inst.events,
tmin=inst.times[0], verbose=inst.verbose)
elif isinstance(inst, Evoked):
combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
return combined_inst
# NeuroMag channel groupings
_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',
'Right-parietal', 'Left-occipital', 'Right-occipital',
'Left-frontal', 'Right-frontal']
_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128']
def _divide_to_regions(info, add_stim=True):
"""Divide channels to regions by positions."""
from scipy.stats import zscore
picks = _pick_data_channels(info, exclude=[])
chs_in_lobe = len(picks) // 4
pos = np.array([ch['loc'][:3] for ch in info['chs']])
x, y, z = pos.T
frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]]
picks = np.setdiff1d(picks, frontal)
occipital = picks[np.argsort(y[picks])[:chs_in_lobe]]
picks = np.setdiff1d(picks, occipital)
temporal = picks[np.argsort(z[picks])[:chs_in_lobe]]
picks = np.setdiff1d(picks, temporal)
lt, rt = _divide_side(temporal, x)
lf, rf = _divide_side(frontal, x)
lo, ro = _divide_side(occipital, x)
lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks.
# Because of the way the sides are divided, there may be outliers in the
# temporal lobes. Here we switch the sides for these outliers. For other
# lobes it is not a big problem because of the vicinity of the lobes.
with np.errstate(invalid='ignore'): # invalid division, greater compare
zs = np.abs(zscore(x[rt]))
outliers = np.array(rt)[np.where(zs > 2.)[0]]
rt = list(np.setdiff1d(rt, outliers))
with np.errstate(invalid='ignore'): # invalid division, greater compare
zs = np.abs(zscore(x[lt]))
outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]]))
lt = list(np.setdiff1d(lt, outliers))
l_mean = np.mean(x[lt])
r_mean = np.mean(x[rt])
for outlier in outliers:
if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]):
lt.append(outlier)
else:
rt.append(outlier)
if add_stim:
stim_ch = _get_stim_channel(None, info, raise_error=False)
if len(stim_ch) > 0:
for region in [lf, rf, lo, ro, lp, rp, lt, rt]:
region.append(info['ch_names'].index(stim_ch[0]))
return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf),
('Left-parietal', lp), ('Right-parietal', rp),
('Left-occipital', lo), ('Right-occipital', ro),
('Left-temporal', lt), ('Right-temporal', rt)])
def _divide_side(lobe, x):
"""Make a separation between left and right lobe evenly."""
lobe = np.asarray(lobe)
median = np.median(x[lobe])
left = lobe[np.where(x[lobe] < median)[0]]
right = lobe[np.where(x[lobe] > median)[0]]
medians = np.where(x[lobe] == median)[0]
left = np.sort(np.concatenate([left, lobe[medians[1::2]]]))
right = np.sort(np.concatenate([right, lobe[medians[::2]]]))
return list(left), list(right)
@verbose
def read_vectorview_selection(name, fname=None, info=None, verbose=None):
"""Read Neuromag Vector View channel selection from a file.
Parameters
----------
name : str | list of str
Name of the selection. If a list, the selections are combined.
Supported selections are: ``'Vertex'``, ``'Left-temporal'``,
``'Right-temporal'``, ``'Left-parietal'``, ``'Right-parietal'``,
``'Left-occipital'``, ``'Right-occipital'``, ``'Left-frontal'`` and
``'Right-frontal'``. Selections can also be matched and combined by
spcecifying common substrings. For example, ``name='temporal`` will
produce a combination of ``'Left-temporal'`` and ``'Right-temporal'``.
fname : str
Filename of the selection file (if ``None``, built-in selections are
used).
%(info)s Used to determine which channel naming convention to use, e.g.
``'MEG 0111'`` (with space) for old Neuromag systems and ``'MEG0111'``
(without space) for new ones.
%(verbose)s
Returns
-------
sel : list of str
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = op.join(op.dirname(__file__), '..', 'data', 'mne_analyze.sel')
fname = _check_fname(fname, must_exist=True, overwrite='read')
# use this to make sure we find at least one match for each name
name_found = {n: False for n in name}
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
|
bloyl/mne-python
|
mne/channels/channels.py
|
Python
|
bsd-3-clause
| 75,094
|
[
"Mayavi"
] |
57201032fc8ad8c42ed0e5ccbad623ae42e8df19ab15b6c364d4eab27917d367
|
import unittest
import numpy as np
import pysal
from pysal.spreg.twosls import TSLS, BaseTSLS
from scipy import sparse as SP
from pysal.common import RTOL
class TestBaseTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h.toarray()[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth, hth,RTOL)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_allclose(reg.hthi, hthi,RTOL)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 35.128823897959187,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_allclose(reg.pfora1a2, pfora1a2,RTOL)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
np.testing.assert_allclose(reg.sig2n_k, 587.56797852699822,RTOL)
np.testing.assert_allclose(reg.sig2n, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.sig2, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.std_y, 16.732092091229699,RTOL)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 27028.127012241919,RTOL)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x.toarray()[0], x_0,RTOL)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.z.toarray()[0], z_0,RTOL)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
# np.testing.assert_allclose(reg.zthhthi, zthhthi,RTOL)
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_n_k(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_white(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 231.07254978, 15.42050291, -11.3941033 ],
[ 15.01376346, 1.92422887, -1.11865505],
[ -11.34381641, -1.1279227 , 0.72053806]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
class TestTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = SP.csr_matrix(self.X)
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = TSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h.toarray()[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth, hth,RTOL)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_allclose(reg.hthi, hthi,RTOL)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 35.128823897959187,RTOL)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_allclose(reg.pfora1a2, pfora1a2,RTOL)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
np.testing.assert_allclose(reg.sig2n_k, 587.56797852699822,RTOL)
np.testing.assert_allclose(reg.sig2n, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.sig2, 551.5944288212637,RTOL)
np.testing.assert_allclose(reg.std_y, 16.732092091229699,RTOL)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 27028.127012241919,RTOL)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_allclose(reg.varb, varb,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x.toarray()[0], x_0,RTOL)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_allclose(reg.yend[0:5], yend_5,RTOL)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_allclose(reg.z.toarray()[0], z_0,RTOL)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
# np.testing.assert_allclose(reg.zthhthi, zthhthi,RTOL)
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi,7)
np.testing.assert_allclose(reg.pr2, 0.27936137128173893,RTOL)
z_stat = np.array([[ 5.84526447e+00, 5.05764078e-09],
[ 3.67601567e-01, 7.13170346e-01],
[ -1.99468913e+00, 4.60767956e-02]])
np.testing.assert_allclose(reg.z_stat, z_stat,RTOL)
title = 'TWO STAGE LEAST SQUARES'
self.assertEqual(reg.title, title)
def test_n_k(self):
reg = TSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
def test_white(self):
reg = TSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertEqual(reg.robust, 'white')
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
reg = TSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertEqual(reg.robust, 'hac')
def test_spatial(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
reg = TSLS(self.y, self.X, self.yd, self.q, spat_diag=True, w=w)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
ak_test = np.array([ 1.16816972, 0.27977763])
np.testing.assert_allclose(reg.ak_test, ak_test,RTOL)
def test_names(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['hoval']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = TSLS(self.y, self.X, self.yd, self.q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_allclose(reg.vm, vm,RTOL)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
self.assertListEqual(reg.name_yend, name_yend)
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
|
ljwolf/pysal
|
pysal/spreg/tests/test_twosls_sparse.py
|
Python
|
bsd-3-clause
| 14,413
|
[
"COLUMBUS"
] |
927b164819dcb4fac349ae474fcbac16c05f96cf4736e4e939979de99a3a042b
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
import types
import scipy.lib.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
else:
self.fcn = fcn
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != types.ListType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != types.DictionaryType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
self.parinfo_in = parinfo
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
self.parnames = self.parinfo(parinfo, 'parname')
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print 'Entering defiter...'
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print "Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof)
for i in range(nprint):
if (parinfo is not None) and (parinfo[i].has_key('parname')):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and (parinfo[i].has_key('mpprint')):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print p + (pformat % x[i]) + ' '
return 0
def print_results(self, **kwargs):
self.defiter(self.fcn, self.params, self.niter, parinfo=self.parinfo_in,
dof=self.dof, fnorm=self.fnorm, **kwargs)
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print 'Entering parinfo...'
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (parinfo[i].has_key(key)):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == types.ListType:
test=default[0]
if isinstance(test, types.IntType):
values = numpy.asarray(values, int)
elif isinstance(test, types.FloatType):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print 'Entering call...'
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print 'Entering fdjac2...'
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print 'ERROR: Derivative matrix was not computed properly.'
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print 'Entering qrfac...'
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print 'Entering qrsolv...'
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
# Because in numpy 1.9, the return value of diagonal is read-only, see
# " In NumPy 1.9 it returns a read-only view on the original array.
# Attempting to write to the resulting array will produce an error."
#
# Jul, 23, 2015
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy() # modified by Chao-Jun Feng
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print 'Entering lmpar...'
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print 'Entering tie...'
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print 'Entering calc_covar...'
if numpy.rank(rr) != 2:
print 'ERROR: r must be a two-dimensional matrix'
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print 'ERROR: r must be a square matrix'
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
shfengcj/pydm
|
pydm/mpfit/mpfit.py
|
Python
|
gpl-2.0
| 93,480
|
[
"Gaussian"
] |
c9e8e00197e35a0f1be5bf0b931f36c8b15fb4e41561fc0c37257dedfdc6154b
|
# encoding: utf-8
"""
A base class for a configurable application.
Authors:
* Brian Granger
* Min RK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import logging
import os
import re
import sys
from copy import deepcopy
from collections import defaultdict
from IPython.external.decorator import decorator
from IPython.config.configurable import SingletonConfigurable
from IPython.config.loader import (
KVArgParseConfigLoader, PyFileConfigLoader, Config, ArgumentError, ConfigFileNotFound, JSONFileConfigLoader
)
from IPython.utils.traitlets import (
Unicode, List, Enum, Dict, Instance, TraitError
)
from IPython.utils.importstring import import_item
from IPython.utils.text import indent, wrap_paragraphs, dedent
from IPython.utils import py3compat
from IPython.utils.py3compat import string_types, iteritems
#-----------------------------------------------------------------------------
# function for re-wrapping a helpstring
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Descriptions for the various sections
#-----------------------------------------------------------------------------
# merge flags&aliases into options
option_description = """
Arguments that take values are actually convenience aliases to full
Configurables, whose aliases are listed on the help line. For more information
on full configurables, see '--help-all'.
""".strip() # trim newlines of front and back
keyvalue_description = """
Parameters are set from command-line arguments of the form:
`--Class.trait=value`.
This line is evaluated in Python, so simple expressions are allowed, e.g.::
`--C.a='range(3)'` For setting C.a=[0,1,2].
""".strip() # trim newlines of front and back
# sys.argv can be missing, for example when python is embedded. See the docs
# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
if not hasattr(sys, "argv"):
sys.argv = [""]
subcommand_description = """
Subcommands are launched as `{app} cmd [args]`. For information on using
subcommand 'cmd', do: `{app} cmd -h`.
"""
# get running program name
#-----------------------------------------------------------------------------
# Application class
#-----------------------------------------------------------------------------
@decorator
def catch_config_error(method, app, *args, **kwargs):
"""Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
"""
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_help()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1)
class ApplicationError(Exception):
pass
class LevelFormatter(logging.Formatter):
"""Formatter with additional `highlevel` record
This field is empty if log level is less than highlevel_limit,
otherwise it is formatted with self.highlevel_format.
Useful for adding 'WARNING' to warning messages,
without adding 'INFO' to info, etc.
"""
highlevel_limit = logging.WARN
highlevel_format = " %(levelname)s |"
def format(self, record):
if record.levelno >= self.highlevel_limit:
record.highlevel = self.highlevel_format % record.__dict__
else:
record.highlevel = ""
return super(LevelFormatter, self).format(record)
class Application(SingletonConfigurable):
"""A singleton application with full configuration support."""
# The name of the application, will usually match the name of the command
# line application
name = Unicode(u'application')
# The description of the application that is printed at the beginning
# of the help.
description = Unicode(u'This is an application.')
# default section descriptions
option_description = Unicode(option_description)
keyvalue_description = Unicode(keyvalue_description)
subcommand_description = Unicode(subcommand_description)
# The usage and example string that goes at the end of the help string.
examples = Unicode()
# A sequence of Configurable subclasses whose config=True attributes will
# be exposed at the command line.
classes = List([])
# The version string of this application.
version = Unicode(u'0.0')
# the argv used to initialize the application
argv = List()
# The log level for the application
log_level = Enum((0,10,20,30,40,50,'DEBUG','INFO','WARN','ERROR','CRITICAL'),
default_value=logging.WARN,
config=True,
help="Set the log level by value or name.")
def _log_level_changed(self, name, old, new):
"""Adjust the log level when log_level is set."""
if isinstance(new, string_types):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new)
log_datefmt = Unicode("%Y-%m-%d %H:%M:%S", config=True,
help="The date format used by logging formatters for %(asctime)s"
)
def _log_datefmt_changed(self, name, old, new):
self._log_format_changed()
log_format = Unicode("[%(name)s]%(highlevel)s %(message)s", config=True,
help="The Logging format template",
)
def _log_format_changed(self, name, old, new):
"""Change the log formatter when log_format is set."""
_log_handler = self.log.handlers[0]
_log_formatter = LevelFormatter(new, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
log = Instance(logging.Logger)
def _log_default(self):
"""Start logging for this application.
The default is to log to stderr using a StreamHandler, if no default
handler already exists. The log level starts at logging.WARN, but this
can be adjusted by setting the ``log_level`` attribute.
"""
log = logging.getLogger(self.__class__.__name__)
log.setLevel(self.log_level)
log.propagate = False
_log = log # copied from Logger.hasHandlers() (new in Python 3.2)
while _log:
if _log.handlers:
return log
if not _log.propagate:
break
else:
_log = _log.parent
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = LevelFormatter(self.log_format, datefmt=self.log_datefmt)
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
# the alias map for configurables
aliases = Dict({'log-level' : 'Application.log_level'})
# flags for loading Configurables or store_const style flags
# flags are loaded from this dict by '--key' flags
# this must be a dict of two-tuples, the first element being the Config/dict
# and the second being the help string for the flag
flags = Dict()
def _flags_changed(self, name, old, new):
"""ensure flags dict is valid"""
for key,value in iteritems(new):
assert len(value) == 2, "Bad flag: %r:%s"%(key,value)
assert isinstance(value[0], (dict, Config)), "Bad flag: %r:%s"%(key,value)
assert isinstance(value[1], string_types), "Bad flag: %r:%s"%(key,value)
# subcommands for launching other applications
# if this is not empty, this will be a parent Application
# this must be a dict of two-tuples,
# the first element being the application class/import string
# and the second being the help string for the subcommand
subcommands = Dict()
# parse_command_line will initialize a subapp, if requested
subapp = Instance('IPython.config.application.Application', allow_none=True)
# extra command-line arguments that don't set config values
extra_args = List(Unicode)
def __init__(self, **kwargs):
SingletonConfigurable.__init__(self, **kwargs)
# Ensure my class is in self.classes, so my attributes appear in command line
# options and config files.
if self.__class__ not in self.classes:
self.classes.insert(0, self.__class__)
def _config_changed(self, name, old, new):
SingletonConfigurable._config_changed(self, name, old, new)
self.log.debug('Config changed:')
self.log.debug(repr(new))
@catch_config_error
def initialize(self, argv=None):
"""Do the basic steps to configure me.
Override in subclasses.
"""
self.parse_command_line(argv)
def start(self):
"""Start the app mainloop.
Override in subclasses.
"""
if self.subapp is not None:
return self.subapp.start()
def print_alias_help(self):
"""Print the alias part of the help."""
if not self.aliases:
return
lines = []
classdict = {}
for cls in self.classes:
# include all parents (up to, but excluding Configurable) in available names
for c in cls.mro()[:-3]:
classdict[c.__name__] = c
for alias, longname in iteritems(self.aliases):
classname, traitname = longname.split('.',1)
cls = classdict[classname]
trait = cls.class_traits(config=True)[traitname]
help = cls.class_get_trait_help(trait).splitlines()
# reformat first line
help[0] = help[0].replace(longname, alias) + ' (%s)'%longname
if len(alias) == 1:
help[0] = help[0].replace('--%s='%alias, '-%s '%alias)
lines.extend(help)
# lines.append('')
print(os.linesep.join(lines))
def print_flag_help(self):
"""Print the flag part of the help."""
if not self.flags:
return
lines = []
for m, (cfg,help) in iteritems(self.flags):
prefix = '--' if len(m) > 1 else '-'
lines.append(prefix+m)
lines.append(indent(dedent(help.strip())))
# lines.append('')
print(os.linesep.join(lines))
def print_options(self):
if not self.flags and not self.aliases:
return
lines = ['Options']
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.option_description):
lines.append(p)
lines.append('')
print(os.linesep.join(lines))
self.print_flag_help()
self.print_alias_help()
print()
def print_subcommands(self):
"""Print the subcommand part of the help."""
if not self.subcommands:
return
lines = ["Subcommands"]
lines.append('-'*len(lines[0]))
lines.append('')
for p in wrap_paragraphs(self.subcommand_description.format(
app=self.name)):
lines.append(p)
lines.append('')
for subc, (cls, help) in iteritems(self.subcommands):
lines.append(subc)
if help:
lines.append(indent(dedent(help.strip())))
lines.append('')
print(os.linesep.join(lines))
def print_help(self, classes=False):
"""Print the help for each Configurable class in self.classes.
If classes=False (the default), only flags and aliases are printed.
"""
self.print_description()
self.print_subcommands()
self.print_options()
if classes:
if self.classes:
print("Class parameters")
print("----------------")
print()
for p in wrap_paragraphs(self.keyvalue_description):
print(p)
print()
for cls in self.classes:
cls.class_print_help()
print()
else:
print("To see all available configurables, use `--help-all`")
print()
self.print_examples()
def print_description(self):
"""Print the application description."""
for p in wrap_paragraphs(self.description):
print(p)
print()
def print_examples(self):
"""Print usage and examples.
This usage string goes at the end of the command line help string
and should contain examples of the application's usage.
"""
if self.examples:
print("Examples")
print("--------")
print()
print(indent(dedent(self.examples.strip())))
print()
def print_version(self):
"""Print the version string."""
print(self.version)
def update_config(self, config):
"""Fire the traits events when the config is updated."""
# Save a copy of the current config.
newconfig = deepcopy(self.config)
# Merge the new config into the current one.
newconfig.merge(config)
# Save the combined config as self.config, which triggers the traits
# events.
self.config = newconfig
@catch_config_error
def initialize_subcommand(self, subc, argv=None):
"""Initialize a subcommand with argv."""
subapp,help = self.subcommands.get(subc)
if isinstance(subapp, string_types):
subapp = import_item(subapp)
# clear existing instances
self.__class__.clear_instance()
# instantiate
self.subapp = subapp.instance(config=self.config)
# and initialize subapp
self.subapp.initialize(argv)
def flatten_flags(self):
"""flatten flags and aliases, so cl-args override as expected.
This prevents issues such as an alias pointing to InteractiveShell,
but a config file setting the same trait in TerminalInteraciveShell
getting inappropriate priority over the command-line arg.
Only aliases with exactly one descendent in the class list
will be promoted.
"""
# build a tree of classes in our list that inherit from a particular
# it will be a dict by parent classname of classes in our list
# that are descendents
mro_tree = defaultdict(list)
for cls in self.classes:
clsname = cls.__name__
for parent in cls.mro()[1:-3]:
# exclude cls itself and Configurable,HasTraits,object
mro_tree[parent.__name__].append(clsname)
# flatten aliases, which have the form:
# { 'alias' : 'Class.trait' }
aliases = {}
for alias, cls_trait in iteritems(self.aliases):
cls,trait = cls_trait.split('.',1)
children = mro_tree[cls]
if len(children) == 1:
# exactly one descendent, promote alias
cls = children[0]
aliases[alias] = '.'.join([cls,trait])
# flatten flags, which are of the form:
# { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
flags = {}
for key, (flagdict, help) in iteritems(self.flags):
newflag = {}
for cls, subdict in iteritems(flagdict):
children = mro_tree[cls]
# exactly one descendent, promote flag section
if len(children) == 1:
cls = children[0]
newflag[cls] = subdict
flags[key] = (newflag, help)
return flags, aliases
@catch_config_error
def parse_command_line(self, argv=None):
"""Parse the command line arguments."""
argv = sys.argv[1:] if argv is None else argv
self.argv = [ py3compat.cast_unicode(arg) for arg in argv ]
if argv and argv[0] == 'help':
# turn `ipython help notebook` into `ipython notebook -h`
argv = argv[1:] + ['-h']
if self.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r'^\w(\-?\w)*$', subc) and subc in self.subcommands:
# it's a subcommand, and *not* a flag or class parameter
return self.initialize_subcommand(subc, subargv)
# Arguments after a '--' argument are for the script IPython may be
# about to run, not IPython iteslf. For arguments parsed here (help and
# version), we want to only search the arguments up to the first
# occurrence of '--', which we're calling interpreted_argv.
try:
interpreted_argv = argv[:argv.index('--')]
except ValueError:
interpreted_argv = argv
if any(x in interpreted_argv for x in ('-h', '--help-all', '--help')):
self.print_help('--help-all' in interpreted_argv)
self.exit(0)
if '--version' in interpreted_argv or '-V' in interpreted_argv:
self.print_version()
self.exit(0)
# flatten flags&aliases, so cl-args get appropriate priority:
flags,aliases = self.flatten_flags()
loader = KVArgParseConfigLoader(argv=argv, aliases=aliases,
flags=flags, log=self.log)
config = loader.load_config()
self.update_config(config)
# store unparsed args in extra_args
self.extra_args = loader.extra_args
@classmethod
def _load_config_files(cls, basefilename, path=None, log=None):
"""Load config files (py,json) by filename and path.
yield each config object in turn.
"""
pyloader = PyFileConfigLoader(basefilename+'.py', path=path, log=log)
jsonloader = JSONFileConfigLoader(basefilename+'.json', path=path, log=log)
config = None
for loader in [pyloader, jsonloader]:
try:
config = loader.load_config()
except ConfigFileNotFound:
pass
except Exception:
# try to get the full filename, but it will be empty in the
# unlikely event that the error raised before filefind finished
filename = loader.full_filename or basefilename
# problem while running the file
if log:
log.error("Exception while loading config file %s",
filename, exc_info=True)
else:
if log:
log.debug("Loaded config file: %s", loader.full_filename)
if config:
yield config
raise StopIteration
@catch_config_error
def load_config_file(self, filename, path=None):
"""Load config files by filename and path."""
filename, ext = os.path.splitext(filename)
for config in self._load_config_files(filename, path=path, log=self.log):
self.update_config(config)
def generate_config_file(self):
"""generate default config file from Configurables"""
lines = ["# Configuration file for %s."%self.name]
lines.append('')
lines.append('c = get_config()')
lines.append('')
for cls in self.classes:
lines.append(cls.class_config_section())
return '\n'.join(lines)
def exit(self, exit_status=0):
self.log.debug("Exiting application: %s" % self.name)
sys.exit(exit_status)
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch a global instance of this Application
If a global instance already exists, this reinitializes and starts it
"""
app = cls.instance(**kwargs)
app.initialize(argv)
app.start()
#-----------------------------------------------------------------------------
# utility functions, for convenience
#-----------------------------------------------------------------------------
def boolean_flag(name, configurable, set_help='', unset_help=''):
"""Helper for building basic --trait, --no-trait flags.
Parameters
----------
name : str
The name of the flag.
configurable : str
The 'Class.trait' string of the trait to be set/unset with the flag
set_help : unicode
help string for --name flag
unset_help : unicode
help string for --no-name flag
Returns
-------
cfg : dict
A dict with two keys: 'name', and 'no-name', for setting and unsetting
the trait, respectively.
"""
# default helpstrings
set_help = set_help or "set %s=True"%configurable
unset_help = unset_help or "set %s=False"%configurable
cls,trait = configurable.split('.')
setter = {cls : {trait : True}}
unsetter = {cls : {trait : False}}
return {name : (setter, set_help), 'no-'+name : (unsetter, unset_help)}
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/config/application.py
|
Python
|
bsd-3-clause
| 21,855
|
[
"Brian"
] |
238213c07fbc5dd169c7bda8331dfa07389b9f908bbe3f0b1850ff69252b3beb
|
#The DF of a tidal stream
import copy
import numpy
import multiprocessing
import scipy
from scipy import special, interpolate, integrate
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.orbit import Orbit
from galpy.util import bovy_coords, fast_cholesky_invert, \
bovy_conversion, multi, bovy_plot, stable_cho_factor, bovy_ars
import warnings
from galpy.util import galpyWarning
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
_labelDict= {'x': r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(object):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
Vnorm=220.,Rnorm=8.,
R0=8.,Zsun=0.025,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor
tdisrupt= (5 Gyr) time since start of disruption (natural units)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially
deltaAngleTrack= (None) angle to estimate the stream track over (rad)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
Vnorm= (220) circular velocity to normalize velocities with
Rnorm= (8) Galactocentric radius to normalize positions with
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./bovy_conversion.time_in_Gyr(Vnorm,Rnorm)
else:
self._tdisrupt= tdisrupt
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= pot
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
self._setup_coord_transform(Rnorm,Vnorm,R0,Zsun,vsun,progenitor)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,maxn=3,
_firstFlip=(not leading))
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
self._dOdJp= calcaAJac(self._progenitor._orb.vxvv,
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None
def _setup_coord_transform(self,Rnorm,Vnorm,R0,Zsun,vsun,progenitor):
#Set the coordinate-transformation parameters; check that these do not conflict with those in the progenitor orbit object; need to use the original, since this objects _progenitor has physical turned off
if progenitor._roSet \
and (numpy.fabs(Rnorm-progenitor._orb._ro) > 10.**-.8 \
or numpy.fabs(R0-progenitor._orb._ro) > 10.**-8.):
warnings.warn("Warning: progenitor's ro does not agree with streamdf's Rnorm and R0; this may have unexpected consequences when projecting into observables", galpyWarning)
if progenitor._voSet \
and numpy.fabs(Vnorm-progenitor._orb._vo) > 10.**-8.:
warnings.warn("Warning: progenitor's vo does not agree with streamdf's Vnorm; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.fabs(Zsun-progenitor._orb._zo) > 10.**-8.:
warnings.warn("Warning: progenitor's zo does not agree with streamdf's Zsun; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.any(numpy.fabs(vsun-numpy.array([0.,Vnorm,0.])\
-progenitor._orb._solarmotion) > 10.**-8.):
warnings.warn("Warning: progenitor's solarmotion does not agree with streamdf's vsun (after accounting for Vnorm); this may have unexpected consequences when projecting into observables", galpyWarning)
self._Vnorm= Vnorm
self._Rnorm= Rnorm
self._R0= R0
self._Zsun= Zsun
self._vsun= vsun
return None
def _setup_progIsTrack(self):
"""If progIsTrack, the progenitor orbit that was passed to the
streamdf initialization is the track at zero angle separation;
this routine computes an actual progenitor position that gives
the desired track given the parameters of the streamdf"""
# We need to flip the sign of the offset, to go to the progenitor
self._sigMeanSign*= -1.
# Use _determine_stream_track_single to calculate the track-progenitor
# offset at zero angle separation
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
0.) #angle = 0
# Setup the new progenitor orbit
progenitor= Orbit(prog_stream_offset[3])
# Flip the offset sign again
self._sigMeanSign*= -1.
# Now re-do the previous setup
self._progenitor_setup(progenitor,self._leading)
self._offset_setup(self._sigangle,self._leading,
self._deltaAngleTrack)
return None
def misalignment(self,isotropic=False):
"""
NAME:
misalignment
PURPOSE:
calculate the misalignment between the progenitor's frequency
and the direction along which the stream disrupts
INPUT:
isotropic= (False), if True, return the misalignment assuming an isotropic action distribution
OUTPUT:
misalignment in degree
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
dODir= self._dOdJpEig[1][:,numpy.argmax(numpy.fabs(self._dOdJpEig[0]))]
else:
dODir= self._dsigomeanProgDirection
out= numpy.arccos(numpy.sum(self._progenitor_Omega*dODir)/numpy.sqrt(numpy.sum(self._progenitor_Omega**2.)))/numpy.pi*180.
if out > 90.: return out-180.
else: return out
def freqEigvalRatio(self,isotropic=False):
"""
NAME:
freqEigvalRatio
PURPOSE:
calculate the ratio between the largest and 2nd-to-largest (in abs)
eigenvalue of sqrt(dO/dJ^T V_J dO/dJ)
(if this is big, a 1D stream will form)
INPUT:
isotropic= (False), if True, return the ratio assuming an isotropic action distribution (i.e., just of dO/dJ)
OUTPUT:
ratio between eigenvalues of |dO / dJ|
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
sortedEig= sorted(numpy.fabs(self._dOdJpEig[0]))
return sortedEig[2]/sortedEig[1]
else:
return numpy.sqrt(self._sortedSigOEig)[2]\
/numpy.sqrt(self._sortedSigOEig)[1]
def estimateTdisrupt(self,deltaAngle):
"""
NAME:
estimateTdisrupt
PURPOSE:
estimate the time of disruption
INPUT:
deltaAngle- spread in angle since disruption
OUTPUT:
time in natural units
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
return deltaAngle\
/numpy.sqrt(numpy.sum(self._dsigomeanProg**2.))
############################STREAM TRACK FUNCTIONS#############################
def plotTrack(self,d1='x',d2='z',interp=True,spread=0,simple=_USESIMPLE,
*args,**kwargs):
"""
NAME:
plotTrack
PURPOSE:
plot the stream track
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
interp= (True) if True, use the interpolated stream track
spread= (0) if int > 0, also plot the spread around the track as spread x sigma
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
simple= (False), if True, use a simple estimate for the spread in perpendicular angle
bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
if not hasattr(self,'_ObsTrackLB') and \
(d1.lower() == 'll' or d1.lower() == 'bb'
or d1.lower() == 'dist' or d1.lower() == 'pmll'
or d1.lower() == 'pmbb' or d1.lower() == 'vlos'
or d2.lower() == 'll' or d2.lower() == 'bb'
or d2.lower() == 'dist' or d2.lower() == 'pmll'
or d2.lower() == 'pmbb' or d2.lower() == 'vlos'):
self.calc_stream_lb()
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_track_dim(d1,interp=interp,phys=phys)
ty= self._parse_track_dim(d2,interp=interp,phys=phys)
bovy_plot.bovy_plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
if spread:
addx, addy= self._parse_track_spread(d1,d2,interp=interp,phys=phys,
simple=simple)
if ('ls' in kwargs and kwargs['ls'] == 'none') \
or ('linestyle' in kwargs \
and kwargs['linestyle'] == 'none'):
kwargs.pop('ls',None)
kwargs.pop('linestyle',None)
spreadls= 'none'
else:
spreadls= '-.'
spreadmarker= kwargs.pop('marker',None)
spreadcolor= kwargs.pop('color',None)
spreadlw= kwargs.pop('lw',1.)
bovy_plot.bovy_plot(tx+spread*addx,ty+spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
bovy_plot.bovy_plot(tx-spread*addx,ty-spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
return None
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs):
"""
NAME:
plotProgenitor
PURPOSE:
plot the progenitor orbit
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
tts= self._progenitor._orb.t[self._progenitor._orb.t \
< self._trackts[self._nTrackChunks-1]]
obs= [self._R0,0.,self._Zsun]
obs.extend(self._vsun)
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_progenitor_dim(d1,tts,ro=self._Rnorm,vo=self._Vnorm,
obs=obs,phys=phys)
ty= self._parse_progenitor_dim(d2,tts,ro=self._Rnorm,vo=self._Vnorm,
obs=obs,phys=phys)
bovy_plot.bovy_plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
return None
def _parse_track_dim(self,d1,interp=True,phys=False):
"""Parse the dimension to plot the stream track for"""
if interp: interpStr= 'interpolated'
else: interpStr= ''
if d1.lower() == 'x':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,0]
elif d1.lower() == 'y':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,1]
elif d1.lower() == 'z':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,2]
elif d1.lower() == 'r':
tx= self.__dict__['_%sObsTrack' % interpStr][:,0]
elif d1.lower() == 'phi':
tx= self.__dict__['_%sObsTrack' % interpStr][:,5]
elif d1.lower() == 'vx':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,3]
elif d1.lower() == 'vy':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,4]
elif d1.lower() == 'vz':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,5]
elif d1.lower() == 'vr':
tx= self.__dict__['_%sObsTrack' % interpStr][:,1]
elif d1.lower() == 'vt':
tx= self.__dict__['_%sObsTrack' % interpStr][:,2]
elif d1.lower() == 'll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,0]
elif d1.lower() == 'bb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,1]
elif d1.lower() == 'dist':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,2]
elif d1.lower() == 'pmll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,4]
elif d1.lower() == 'pmbb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,5]
elif d1.lower() == 'vlos':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,3]
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._Rnorm
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._Vnorm
return tx
def _parse_progenitor_dim(self,d1,ts,ro=None,vo=None,obs=None,
phys=False):
"""Parse the dimension to plot the progenitor orbit for"""
if d1.lower() == 'x':
tx= self._progenitor.x(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'y':
tx= self._progenitor.y(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'z':
tx= self._progenitor.z(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'r':
tx= self._progenitor.R(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'phi':
tx= self._progenitor.phi(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vx':
tx= self._progenitor.vx(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vy':
tx= self._progenitor.vy(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vz':
tx= self._progenitor.vz(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vr':
tx= self._progenitor.vR(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vt':
tx= self._progenitor.vT(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'll':
tx= self._progenitor.ll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'bb':
tx= self._progenitor.bb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'dist':
tx= self._progenitor.dist(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmll':
tx= self._progenitor.pmll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmbb':
tx= self._progenitor.pmbb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vlos':
tx= self._progenitor.vlos(ts,ro=ro,vo=vo,obs=obs)
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._Rnorm
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._Vnorm
return tx
def _parse_track_spread(self,d1,d2,interp=True,phys=False,
simple=_USESIMPLE):
"""Determine the spread around the track"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
okaySpreadR= ['r','vr','vt','z','vz','phi']
okaySpreadXY= ['x','y','z','vx','vy','vz']
okaySpreadLB= ['ll','bb','dist','vlos','pmll','pmbb']
#Determine which coordinate system we're in
coord= [False,False,False] #R, XY, LB
if d1.lower() in okaySpreadR and d2.lower() in okaySpreadR:
coord[0]= True
elif d1.lower() in okaySpreadXY and d2.lower() in okaySpreadXY:
coord[1]= True
elif d1.lower() in okaySpreadLB and d2.lower() in okaySpreadLB:
coord[2]= True
else:
raise NotImplementedError("plotting the spread for coordinates from different systems not implemented yet ...")
#Get the right 2D Jacobian
indxDict= {}
indxDict['r']= 0
indxDict['vr']= 1
indxDict['vt']= 2
indxDict['z']= 3
indxDict['vz']= 4
indxDict['phi']= 5
indxDictXY= {}
indxDictXY['x']= 0
indxDictXY['y']= 1
indxDictXY['z']= 2
indxDictXY['vx']= 3
indxDictXY['vy']= 4
indxDictXY['vz']= 5
indxDictLB= {}
indxDictLB['ll']= 0
indxDictLB['bb']= 1
indxDictLB['dist']= 2
indxDictLB['vlos']= 3
indxDictLB['pmll']= 4
indxDictLB['pmbb']= 5
if coord[0]:
relevantCov= self._allErrCovs
relevantDict= indxDict
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._Rnorm,self._Vnorm,self._Vnorm,
self._Rnorm,self._Vnorm,1.])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[1]:
relevantCov= self._allErrCovsXY
relevantDict= indxDictXY
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._Rnorm,self._Rnorm,self._Rnorm,
self._Vnorm,self._Vnorm,self._Vnorm])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[2]:
relevantCov= self._allErrCovsLBUnscaled
relevantDict= indxDictLB
indx0= numpy.array([[relevantDict[d1.lower()],relevantDict[d1.lower()]],
[relevantDict[d2.lower()],relevantDict[d2.lower()]]])
indx1= numpy.array([[relevantDict[d1.lower()],relevantDict[d2.lower()]],
[relevantDict[d1.lower()],relevantDict[d2.lower()]]])
cov= relevantCov[:,indx0,indx1] #cov contains all nTrackChunks covs
if not interp:
out= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
out[ii]= minEigvec*numpy.sqrt(covEig[0][minIndx])
eigDir= minEigvec
else:
#We slerp the minor eigenvector and interpolate the eigenvalue
#First store all of the eigenvectors on the track
allEigval= numpy.empty(self._nTrackChunks)
allEigvec= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
allEigval[ii]= numpy.sqrt(covEig[0][minIndx])
allEigvec[ii]= minEigvec
eigDir= minEigvec
#Now interpolate where needed
interpEigval=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allEigval,k=3)
interpolatedEigval= interpEigval(self._interpolatedThetasTrack)
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
2))
for ii in range(self._nTrackChunks-1):
slerpOmega= numpy.arccos(numpy.sum(allEigvec[ii]*allEigvec[ii+1]))
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(2):
interpolatedEigvec[slerpIndx,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmega)*allEigvec[ii,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmega)*allEigvec[ii+1,jj])/numpy.sin(slerpOmega)
out= numpy.tile(interpolatedEigval.T,(2,1)).T*interpolatedEigvec
if coord[2]: #if LB, undo rescalings that were applied before
out[:,0]*= self._ErrCovsLBScale[relevantDict[d1.lower()]]
out[:,1]*= self._ErrCovsLBScale[relevantDict[d2.lower()]]
return (out[:,0],out[:,1])
def plotCompareTrackAAModel(self,**kwargs):
"""
NAME:
plotCompareTrackAAModel
PURPOSE:
plot the comparison between the underlying model's dOmega_perp vs. dangle_r (line) and the track in (x,v)'s dOmega_perp vs. dangle_r (dots; explicitly calculating the track's action-angle coordinates)
INPUT:
bovy_plot.bovy_plot kwargs
OUTPUT:
plot
HISTORY:
2014-08-27 - Written - Bovy (IAS)
"""
#First calculate the model
model_adiff= (self._ObsTrackAA[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
model_operp= numpy.dot(self._ObsTrackAA[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
#Then calculate the track's frequency-angle coordinates
if self._multi is None:
aatrack= numpy.empty((self._nTrackChunks,6))
for ii in range(self._nTrackChunks):
aatrack[ii]= self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[ii,:]),
maxn=3)[3:]
else:
aatrack= numpy.reshape(\
multi.parallel_map(
(lambda x: self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[x,:]), maxn=3)[3:]),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi])),(self._nTrackChunks,6))
track_adiff= (aatrack[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
track_operp= numpy.dot(aatrack[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
overplot= kwargs.pop('overplot',False)
yrange= kwargs.pop('yrange',
[0.,numpy.amax(numpy.hstack((model_operp,track_operp)))*1.1])
xlabel= kwargs.pop('xlabel',r'$\Delta \theta_R$')
ylabel= kwargs.pop('ylabel',r'$\Delta \Omega_\parallel$')
bovy_plot.bovy_plot(model_adiff,model_operp,'k-',overplot=overplot,
xlabel=xlabel,ylabel=ylabel,yrange=yrange,**kwargs)
bovy_plot.bovy_plot(track_adiff,track_operp,'ko',overplot=True,
**kwargs)
return None
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment()) < 1.:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment()) >= 1. \
and numpy.fabs(self.misalignment()) < 3.:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment()) >= 3.:
self.nTrackIterations= 2
return None
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2.*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack._orb.orbit[:,1]= -auxiliaryTrack._orb.orbit[:,1]
auxiliaryTrack._orb.orbit[:,2]= -auxiliaryTrack._orb.orbit[:,2]
auxiliaryTrack._orb.orbit[:,4]= -auxiliaryTrack._orb.orbit[:,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),maxn=3)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
self.meanOmega,
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._ObsTrackXY= numpy.empty_like(self._ObsTrack)
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
bovy_coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
self._ObsTrackXY[:,0]= TrackX
self._ObsTrackXY[:,1]= TrackY
self._ObsTrackXY[:,2]= TrackZ
self._ObsTrackXY[:,3]= TrackvX
self._ObsTrackXY[:,4]= TrackvY
self._ObsTrackXY[:,5]= TrackvZ
return None
def _determine_stream_spread(self,simple=_USESIMPLE):
"""Determine the spread around the stream track, just sets matrices that describe the covariances"""
allErrCovs= numpy.empty((self._nTrackChunks,6,6))
if self._multi is None:
for ii in range(self._nTrackChunks):
allErrCovs[ii]= _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[ii],
self.sigOmega,
lambda y: self.sigangledAngle(y,simple=simple),
self._allinvjacsTrack[ii])
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[x],
self.sigOmega,
lambda y: self.sigangledAngle(y,simple=simple),
self._allinvjacsTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allErrCovs[ii]= multiOut[ii]
self._allErrCovs= allErrCovs
#Also propagate to XYZ coordinates
allErrCovsXY= numpy.empty_like(self._allErrCovs)
allErrCovsEigvalXY= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecXY= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjac= bovy_coords.cyl_to_rect_jac(*self._ObsTrack[ii])
allErrCovsXY[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovs[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsXY[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalXY[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecXY[ii]= teig[1][:,sortIndx]
self._allErrCovsXY= allErrCovsXY
#Interpolate the allErrCovsXY covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalXY=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalXY[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsXY= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalXY[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecXY[ii,:,jj]*allErrCovsEigvecXY[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecXY[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecXY[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsXY[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsXY= interpolatedAllErrCovsXY
#Also interpolate in l and b coordinates
self._determine_stream_spreadLB(simple=simple)
return None
def _determine_stream_spreadLB(self,simple=_USESIMPLE,
Rnorm=None,Vnorm=None,
R0=None,Zsun=None,vsun=None):
"""Determine the spread in the stream in observable coordinates"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
if Rnorm is None:
Rnorm= self._Rnorm
if Vnorm is None:
Vnorm= self._Vnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
allErrCovsLB= numpy.empty_like(self._allErrCovs)
obs= [R0,0.,Zsun]
obs.extend(vsun)
obskwargs= {}
obskwargs['ro']= Rnorm
obskwargs['vo']= Vnorm
obskwargs['obs']= obs
self._ErrCovsLBScale= [180.,90.,
self._progenitor.dist(**obskwargs),
numpy.fabs(self._progenitor.vlos(**obskwargs)),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.)]
allErrCovsEigvalLB= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecLB= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjacXY= bovy_coords.galcenrect_to_XYZ_jac(*self._ObsTrackXY[ii])
tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
tjacLB[:3,:]/= Rnorm
tjacLB[3:,:]/= Vnorm
for jj in range(6):
tjacLB[:,jj]*= self._ErrCovsLBScale[jj]
tjac= numpy.dot(numpy.linalg.inv(tjacLB),tjacXY)
allErrCovsLB[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovsXY[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsLB[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalLB[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecLB[ii]= teig[1][:,sortIndx]
self._allErrCovsLBUnscaled= allErrCovsLB
#Interpolate the allErrCovsLB covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalLB=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalLB[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsLB= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalLB[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecLB[ii,:,jj]*allErrCovsEigvecLB[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecLB[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecLB[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsLB[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsLBUnscaled= interpolatedAllErrCovsLB
#Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points
trackLogDetJacLB= numpy.empty_like(self._thetasTrack)
interpolatedTrackLogDetJacLB=\
numpy.empty_like(self._interpolatedThetasTrack)
for ii in range(self._nTrackChunks):
tjacLB= bovy_coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
trackLogDetJacLB[ii]= numpy.log(numpy.linalg.det(tjacLB))
self._trackLogDetJacLB= trackLogDetJacLB
for ii in range(len(self._interpolatedThetasTrack)):
tjacLB=\
bovy_coords.lbd_to_XYZ_jac(*self._interpolatedObsTrackLB[ii],
degree=True)
interpolatedTrackLogDetJacLB[ii]=\
numpy.log(numpy.linalg.det(tjacLB))
self._interpolatedTrackLogDetJacLB= interpolatedTrackLogDetJacLB
return None
def _interpolate_stream_track(self):
"""Build interpolations of the stream track"""
if hasattr(self,'_interpolatedThetasTrack'):
return None #Already did this
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
bovy_coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
#Interpolate
self._interpTrackX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackX,k=3)
self._interpTrackY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackY,k=3)
self._interpTrackZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackZ,k=3)
self._interpTrackvX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvX,k=3)
self._interpTrackvY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvY,k=3)
self._interpTrackvZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvZ,k=3)
#Now store an interpolated version of the stream track
self._interpolatedThetasTrack=\
numpy.linspace(0.,self._deltaAngleTrack,1001)
self._interpolatedObsTrackXY= numpy.empty((len(self._interpolatedThetasTrack),6))
self._interpolatedObsTrackXY[:,0]=\
self._interpTrackX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,1]=\
self._interpTrackY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,2]=\
self._interpTrackZ(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,3]=\
self._interpTrackvX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,4]=\
self._interpTrackvY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,5]=\
self._interpTrackvZ(self._interpolatedThetasTrack)
#Also in cylindrical coordinates
self._interpolatedObsTrack= \
numpy.empty((len(self._interpolatedThetasTrack),6))
tR,tphi,tZ= bovy_coords.rect_to_cyl(self._interpolatedObsTrackXY[:,0],
self._interpolatedObsTrackXY[:,1],
self._interpolatedObsTrackXY[:,2])
tvR,tvT,tvZ=\
bovy_coords.rect_to_cyl_vec(self._interpolatedObsTrackXY[:,3],
self._interpolatedObsTrackXY[:,4],
self._interpolatedObsTrackXY[:,5],
tR,tphi,tZ,cyl=True)
self._interpolatedObsTrack[:,0]= tR
self._interpolatedObsTrack[:,1]= tvR
self._interpolatedObsTrack[:,2]= tvT
self._interpolatedObsTrack[:,3]= tZ
self._interpolatedObsTrack[:,4]= tvZ
self._interpolatedObsTrack[:,5]= tphi
return None
def _interpolate_stream_track_aA(self):
"""Build interpolations of the stream track in action-angle coordinates"""
if hasattr(self,'_interpolatedObsTrackAA'):
return None #Already did this
#Calculate 1D meanOmega on a fine grid in angle and interpolate
if not hasattr(self,'_interpolatedThetasTrack'):
self._interpolate_stream_track()
dmOs= numpy.array([self.meanOmega(da,oned=True)
for da in self._interpolatedThetasTrack])
self._interpTrackAAdmeanOmegaOneD=\
interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,dmOs,k=3)
#Build the interpolated AA
self._interpolatedObsTrackAA=\
numpy.empty((len(self._interpolatedThetasTrack),6))
for ii in range(len(self._interpolatedThetasTrack)):
self._interpolatedObsTrackAA[ii,:3]=\
self._progenitor_Omega+dmOs[ii]*self._dsigomeanProgDirection\
*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
self._progenitor_angle+self._interpolatedThetasTrack[ii]\
*self._dsigomeanProgDirection*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
numpy.mod(self._interpolatedObsTrackAA[ii,3:],2.*numpy.pi)
return None
def calc_stream_lb(self,
Vnorm=None,Rnorm=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
calc_stream_lb
PURPOSE:
convert the stream track to observational coordinates and store
INPUT:
Coordinate transformation inputs (all default to the instance-wide
values):
Vnorm= circular velocity to normalize velocities with
Rnorm= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(none)
HISTORY:
2013-12-02 - Written - Bovy (IAS)
"""
if Vnorm is None:
Vnorm= self._Vnorm
if Rnorm is None:
Rnorm= self._Rnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
self._ObsTrackLB= numpy.empty_like(self._ObsTrack)
XYZ= bovy_coords.galcencyl_to_XYZ(self._ObsTrack[:,0]*Rnorm,
self._ObsTrack[:,5],
self._ObsTrack[:,3]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ= bovy_coords.galcencyl_to_vxvyvz(self._ObsTrack[:,1]*Vnorm,
self._ObsTrack[:,2]*Vnorm,
self._ObsTrack[:,4]*Vnorm,
self._ObsTrack[:,5],
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],slbd[:,2],
degree=True)
self._ObsTrackLB[:,0]= slbd[:,0]
self._ObsTrackLB[:,1]= slbd[:,1]
self._ObsTrackLB[:,2]= slbd[:,2]
self._ObsTrackLB[:,3]= svlbd[:,0]
self._ObsTrackLB[:,4]= svlbd[:,1]
self._ObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_interpolatedObsTrackXY'):
#Do the same for the interpolated track
self._interpolatedObsTrackLB=\
numpy.empty_like(self._interpolatedObsTrackXY)
XYZ=\
bovy_coords.galcenrect_to_XYZ(\
self._interpolatedObsTrackXY[:,0]*Rnorm,
self._interpolatedObsTrackXY[:,1]*Rnorm,
self._interpolatedObsTrackXY[:,2]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ=\
bovy_coords.galcenrect_to_vxvyvz(\
self._interpolatedObsTrackXY[:,3]*Vnorm,
self._interpolatedObsTrackXY[:,4]*Vnorm,
self._interpolatedObsTrackXY[:,5]*Vnorm,
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
self._interpolatedObsTrackLB[:,0]= slbd[:,0]
self._interpolatedObsTrackLB[:,1]= slbd[:,1]
self._interpolatedObsTrackLB[:,2]= slbd[:,2]
self._interpolatedObsTrackLB[:,3]= svlbd[:,0]
self._interpolatedObsTrackLB[:,4]= svlbd[:,1]
self._interpolatedObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_allErrCovsLBUnscaled'):
#Re-calculate this
self._determine_stream_spreadLB(simple=_USESIMPLE,
Vnorm=Vnorm,Rnorm=Rnorm,
R0=R0,Zsun=Zsun,vsun=vsun)
return None
def _find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""For backward compatibility"""
return self.find_closest_trackpoint(R,vR,vT,z,vz,phi,
interp=interp,xy=xy,
usev=usev)
def find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""
NAME:
find_closest_trackpoint
PURPOSE:
find the closest point on the stream track to a given point
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
xy= (False) if True, input is X,Y,Z,vX,vY,vZ in Galactocentric rectangular coordinates; if xy, some coordinates may be missing (given as None) and they will not be used
usev= (False) if True, also use velocities to find the closest point
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
if xy:
X= R
Y= vR
Z= vT
else:
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if xy and usev:
vX= z
vY= vz
vZ= phi
elif usev:
vX= vR*numpy.cos(phi)-vT*numpy.sin(phi)
vY= vR*numpy.sin(phi)+vT*numpy.cos(phi)
vZ= vz
present= [not X is None,not Y is None,not Z is None]
if usev: present.extend([not vX is None,not vY is None,not vZ is None])
present= numpy.array(present,dtype='float')
if X is None: X= 0.
if Y is None: Y= 0.
if Z is None: Z= 0.
if usev and vX is None: vX= 0.
if usev and vY is None: vY= 0.
if usev and vZ is None: vZ= 0.
if interp:
dist2= present[0]*(X-self._interpolatedObsTrackXY[:,0])**2.\
+present[1]*(Y-self._interpolatedObsTrackXY[:,1])**2.\
+present[2]*(Z-self._interpolatedObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._interpolatedObsTrackXY[:,3])**2.\
+present[4]*(vY-self._interpolatedObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._interpolatedObsTrackXY[:,5])**2.
else:
dist2= present[0]*(X-self._ObsTrackXY[:,0])**2.\
+present[1]*(Y-self._ObsTrackXY[:,1])**2.\
+present[2]*(Z-self._ObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._ObsTrackXY[:,3])**2.\
+present[4]*(vY-self._ObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._ObsTrackXY[:,5])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
return self.find_closest_trackpointLB(l,b,D,vlos,pmll,pmbb,
interp=interp,
usev=usev)
def find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
"""
NAME:
find_closest_trackpointLB
PURPOSE:
find the closest point on the stream track to a given point in (l,b,...) coordinates
INPUT:
l,b,D,vlos,pmll,pmbb- coordinates in (deg,deg,kpc,km/s,mas/yr,mas/yr)
interp= (True) if True, return the closest index on the interpolated track
usev= (False) if True, also use the velocity components (default is to only use the positions)
OUTPUT:
index of closest track point on the interpolated or not-interpolated track
HISTORY:
2013-12-17- Written - Bovy (IAS)
"""
if interp:
nTrackPoints= len(self._interpolatedThetasTrack)
else:
nTrackPoints= len(self._thetasTrack)
if l is None:
l= 0.
trackL= numpy.zeros(nTrackPoints)
elif interp:
trackL= self._interpolatedObsTrackLB[:,0]
else:
trackL= self._ObsTrackLB[:,0]
if b is None:
b= 0.
trackB= numpy.zeros(nTrackPoints)
elif interp:
trackB= self._interpolatedObsTrackLB[:,1]
else:
trackB= self._ObsTrackLB[:,1]
if D is None:
D= 1.
trackD= numpy.ones(nTrackPoints)
elif interp:
trackD= self._interpolatedObsTrackLB[:,2]
else:
trackD= self._ObsTrackLB[:,2]
if usev:
if vlos is None:
vlos= 0.
trackVlos= numpy.zeros(nTrackPoints)
elif interp:
trackVlos= self._interpolatedObsTrackLB[:,3]
else:
trackVlos= self._ObsTrackLB[:,3]
if pmll is None:
pmll= 0.
trackPmll= numpy.zeros(nTrackPoints)
elif interp:
trackPmll= self._interpolatedObsTrackLB[:,4]
else:
trackPmll= self._ObsTrackLB[:,4]
if pmbb is None:
pmbb= 0.
trackPmbb= numpy.zeros(nTrackPoints)
elif interp:
trackPmbb= self._interpolatedObsTrackLB[:,5]
else:
trackPmbb= self._ObsTrackLB[:,5]
#Calculate rectangular coordinates
XYZ= bovy_coords.lbd_to_XYZ(l,b,D,degree=True)
trackXYZ= bovy_coords.lbd_to_XYZ(trackL,trackB,trackD,degree=True)
if usev:
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(vlos,pmll,pmbb,
XYZ[0],XYZ[1],XYZ[2],
XYZ=True)
trackvxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(trackVlos,trackPmll,
trackPmbb,
trackXYZ[:,0],
trackXYZ[:,1],
trackXYZ[:,2],
XYZ=True)
#Calculate distance
dist2= (XYZ[0]-trackXYZ[:,0])**2.\
+(XYZ[1]-trackXYZ[:,1])**2.\
+(XYZ[2]-trackXYZ[:,2])**2.
if usev:
dist2+= (vxvyvz[0]-trackvxvyvz[:,0])**2.\
+(vxvyvz[1]-trackvxvyvz[:,1])**2.\
+(vxvyvz[2]-trackvxvyvz[:,2])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_find_closest_trackpointaA
PURPOSE:
find the closest point on the stream track to a given point in
frequency-angle coordinates
INPUT:
Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
#Calculate angle offset along the stream parallel to the stream track
angle= numpy.hstack((ar,ap,az))
da= angle-self._progenitor_angle
dapar= self._sigMeanSign*numpy.sum(da*self._dsigomeanProgDirection)
if interp:
dist= numpy.fabs(dapar-self._interpolatedThetasTrack)
else:
dist= numpy.fabs(dapar-self._thetasTrack)
return numpy.argmin(dist)
#########DISTRIBUTION AS A FUNCTION OF ANGLE ALONG THE STREAM##################
def meanOmega(self,dangle,oned=False):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
OUTPUT:
mean Omega
HISTORY:
2013-12-01 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
dO1D= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO)
if oned: return dO1D
else:
return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\
*self._sigMeanSign
def sigOmega(self,dangle):
"""
NAME:
sigmaOmega
PURPOSE:
calculate the 1D sigma in frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
OUTPUT:
sigma Omega
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
sO1D2= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*(meandO+dOmin)\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO**2.+self._sortedSigOEig[2])
mO= self.meanOmega(dangle,oned=True)
return numpy.sqrt(sO1D2-mO**2.)
def ptdAngle(self,t,dangle):
"""
NAME:
ptdangle
PURPOSE:
return the probability of a given stripping time at a given angle along the stream
INPUT:
t - stripping time
dangle - angle offset along the stream
OUTPUT:
p(td|dangle)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isinstance(t,(int,float,numpy.float32,numpy.float64)):
t= numpy.array([t])
out= numpy.zeros(len(t))
if t > 0.:
dO= dangle/t[t < self._tdisrupt]
else:
return 0.
#p(t|a) = \int dO p(O,t|a) = \int dO p(t|O,a) p(O|a) = \int dO delta (t-a/O)p(O|a) = O*2/a p(O|a); p(O|a) = \int dt p(a|O,t) p(O)p(t) = 1/O p(O)
out[t < self._tdisrupt]=\
dO**2./dangle*numpy.exp(-0.5*(dO-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
def meantdAngle(self,dangle):
"""
NAME:
meantdAngle
PURPOSE:
calculate the mean stripping time at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
mean stripping time at this dangle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
num= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return self._tdisrupt
elif numpy.isnan(denom): return 0.
else: return num/denom
def sigtdAngle(self,dangle):
"""
NAME:
sigtdAngle
PURPOSE:
calculate the dispersion in the stripping times at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
dispersion in the stripping times at this angle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
numsig2= integrate.quad(lambda x: x**2.*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
nummean= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def pangledAngle(self,angleperp,dangle,smallest=False):
"""
NAME:
pangledAngle
PURPOSE:
return the probability of a given perpendicular angle at a given angle along the stream
INPUT:
angleperp - perpendicular angle
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
p(angle_perp|dangle)
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
out= numpy.zeros(len(angleperp))
out= numpy.array([\
integrate.quad(self._pangledAnglet,0.,self._tdisrupt,
(ap,dangle,smallest))[0] for ap in angleperp])
return out
def meanangledAngle(self,dangle,smallest=False):
"""
NAME:
meanangledAngle
PURPOSE:
calculate the mean perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
mean perpendicular angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])\
*self._tdisrupt*5.,
self._sigangle])
num= integrate.quad(lambda x: x*self.pangledAngle(x,dangle,smallest),
aplow,-aplow)[0]
denom= integrate.quad(self.pangledAngle,aplow,-aplow,
(dangle,smallest))[0]
if denom == 0.: return numpy.nan
else: return num/denom
def sigangledAngle(self,dangle,assumeZeroMean=True,smallest=False,
simple=False):
"""
NAME:
sigangledAngle
PURPOSE:
calculate the dispersion in the perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
assumeZeroMean= (True) if True, assume that the mean is zero (should be)
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
simple= (False), if True, return an even simpler estimate
OUTPUT:
dispersion in the perpendicular angle at this angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
if simple:
dt= self.meantdAngle(dangle)
return numpy.sqrt(self._sigangle2
+self._sortedSigOEig[eigIndx]*dt**2.)
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])*self._tdisrupt*5.,
self._sigangle])
numsig2= integrate.quad(lambda x: x**2.*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
if not assumeZeroMean:
nummean= integrate.quad(lambda x: x*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
else:
nummean= 0.
denom= integrate.quad(self.pangledAngle,aplow,-aplow,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def _pangledAnglet(self,t,angleperp,dangle,smallest):
"""p(angle_perp|angle_par,time)"""
if smallest: eigIndx= 0
else: eigIndx= 1
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
t= numpy.array([t])
out= numpy.zeros_like(angleperp)
tindx= t < self._tdisrupt
out[tindx]=\
numpy.exp(-0.5*angleperp[tindx]**2.\
/(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2))/\
numpy.sqrt(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2)\
*self.ptdAngle(t[t < self._tdisrupt],dangle)
return out
################APPROXIMATE FREQUENCY-ANGLE TRANSFORMATION#####################
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True):
"""
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
"""
if isinstance(R,(int,float,numpy.float32,numpy.float64)): #Scalar input
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
closestIndx= [self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=False)
for ii in range(len(R))]
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
#Make sure phi hasn't wrapped around
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot(self._alljacsTrack[jacIndx,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out
def _approxaAInv(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if isinstance(Or,(int,float,numpy.float32,numpy.float64)): #Scalar input
Or= numpy.array([Or])
Op= numpy.array([Op])
Oz= numpy.array([Oz])
ar= numpy.array([ar])
ap= numpy.array([ap])
az= numpy.array([az])
#Calculate apar, angle offset along the stream
closestIndx= [self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=interp)\
for ii in range(len(Or))]
out= numpy.empty((6,len(Or)))
for ii in range(len(Or)):
dOa= numpy.empty(6)
if interp:
dOa[0]= Or[ii]-self._interpolatedObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._interpolatedObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._interpolatedObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._interpolatedObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._interpolatedObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._interpolatedObsTrackAA[closestIndx[ii],5]
jacIndx= self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=False)
else:
dOa[0]= Or[ii]-self._ObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._ObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._ObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._ObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._ObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._ObsTrackAA[closestIndx[ii],5]
jacIndx= closestIndx[ii]
#Make sure the angles haven't wrapped around
if dOa[3] > numpy.pi:
dOa[3]-= 2.*numpy.pi
elif dOa[3] < -numpy.pi:
dOa[3]+= 2.*numpy.pi
if dOa[4] > numpy.pi:
dOa[4]-= 2.*numpy.pi
elif dOa[4] < -numpy.pi:
dOa[4]+= 2.*numpy.pi
if dOa[5] > numpy.pi:
dOa[5]-= 2.*numpy.pi
elif dOa[5] < -numpy.pi:
dOa[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot(self._allinvjacsTrack[jacIndx,:,:],
dOa)
if interp:
out[:,ii]+= self._interpolatedObsTrack[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrack[closestIndx[ii]]
return out
################################EVALUATE THE DF################################
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the DF
INPUT:
Either:
a) R,vR,vT,z,vz,phi ndarray [nobjects]
b) (Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) tuple if aAInput
where:
Omegar - radial frequency
Omegaphi - azimuthal frequency
Omegaz - vertical frequency
angler - radial angle
anglephi - azimuthal angle
anglez - vertical angle
c) Orbit instance or list thereof
log= if True, return the natural log
aaInput= (False) if True, option b above
OUTPUT:
value of DF
HISTORY:
2013-12-03 - Written - Bovy (IAS)
"""
#First parse log
log= kwargs.pop('log',True)
dOmega, dangle= self.prepData4Call(*args,**kwargs)
#Omega part
dOmega4dfOmega= dOmega\
-numpy.tile(self._dsigomeanProg.T,(dOmega.shape[1],1)).T
logdfOmega= -0.5*numpy.sum(dOmega4dfOmega*
numpy.dot(self._sigomatrixinv,
dOmega4dfOmega),
axis=0)-0.5*self._sigomatrixLogdet\
+numpy.log(numpy.fabs(numpy.dot(self._dsigomeanProgDirection,dOmega)))
#Angle part
dangle2= numpy.sum(dangle**2.,axis=0)
dOmega2= numpy.sum(dOmega**2.,axis=0)
dOmegaAngle= numpy.sum(dOmega*dangle,axis=0)
logdfA= -0.5/self._sigangle2*(dangle2-dOmegaAngle**2./dOmega2)\
-2.*self._lnsigangle-0.5*numpy.log(dOmega2)
#Finite stripping part
a0= dOmegaAngle/numpy.sqrt(2.)/self._sigangle/numpy.sqrt(dOmega2)
ad= numpy.sqrt(dOmega2)/numpy.sqrt(2.)/self._sigangle\
*(self._tdisrupt-dOmegaAngle/dOmega2)
loga= numpy.log((special.erf(a0)+special.erf(ad))/2.) #divided by 2 st 0 for well-within the stream
out= logdfA+logdfOmega+loga+self._logmeandetdOdJp
if log:
return out
else:
return numpy.exp(out)
def prepData4Call(self,*args,**kwargs):
"""
NAME:
prepData4Call
PURPOSE:
prepare stream data for the __call__ method
INPUT:
__call__ inputs
OUTPUT:
(dOmega,dangle); wrt the progenitor; each [3,nobj]
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
#First calculate the actionAngle coordinates if they're not given
#as such
freqsAngles= self._parse_call_args(*args,**kwargs)
dOmega= freqsAngles[:3,:]\
-numpy.tile(self._progenitor_Omega.T,(freqsAngles.shape[1],1)).T
dangle= freqsAngles[3:,:]\
-numpy.tile(self._progenitor_angle.T,(freqsAngles.shape[1],1)).T
#Assuming single wrap, resolve large angle differences (wraps should be marginalized over)
dangle[(dangle < -4.)]+= 2.*numpy.pi
dangle[(dangle > 4.)]-= 2.*numpy.pi
return (dOmega,dangle)
def _parse_call_args(self,*args,**kwargs):
"""Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:)"""
interp= kwargs.get('interp',self._useInterp)
if len(args) == 5:
raise IOError("Must specify phi for streamdf")
elif len(args) == 6:
if kwargs.get('aAInput',False):
if isinstance(args[0],(int,float,numpy.float32,numpy.float64)):
out= numpy.empty((6,1))
else:
out= numpy.empty((6,len(args[0])))
for ii in range(6):
out[ii,:]= args[ii]
return out
else:
return self._approxaA(*args,interp=interp)
elif isinstance(args[0],Orbit):
o= args[0]
return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(),
interp=interp)
elif isinstance(args[0],list) and isinstance(args[0][0],Orbit):
R, vR, vT, z, vz, phi= [], [], [], [], [], []
for o in args[0]:
R.append(o.R())
vR.append(o.vR())
vT.append(o.vT())
z.append(o.z())
vz.append(o.vz())
phi.append(o.phi())
return self._approxaA(numpy.array(R),numpy.array(vR),
numpy.array(vT),numpy.array(z),
numpy.array(vz),numpy.array(phi),
interp=interp)
def callMarg(self,xy,**kwargs):
"""
NAME:
callMarg
PURPOSE:
evaluate the DF, marginalizing over some directions, in Galactocentric rectangular coordinates (or in observed l,b,D,vlos,pmll,pmbb) coordinates)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
nsigma= (3) number of sigma to marginalize the DF over (approximate sigma)
ngl= (5) order of Gauss-Legendre integration
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the marginalized PDF in these coordinates is returned
Vnorm= (220) circular velocity to normalize with when lb=True
Rnorm= (8) Galactocentric radius to normalize with when lb=True
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
p(xy) marginalized over missing directions in xy
HISTORY:
2013-12-16 - Written - Bovy (IAS)
"""
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
if numpy.sum(coordGiven) == 6:
raise NotImplementedError("When specifying all coordinates, please use __call__ instead of callMarg")
#First construct the Gaussian approximation at this xy
gaussmean, gaussvar= self.gaussApprox(xy,**kwargs)
cholvar, chollower= stable_cho_factor(gaussvar)
#Now Gauss-legendre integrate over missing directions
ngl= kwargs.get('ngl',5)
nsigma= kwargs.get('nsigma',3)
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
coordEval= []
weightEval= []
jj= 0
baseX= (glx+1)/2.
baseX= list(baseX)
baseX.extend(-(glx+1)/2.)
baseX= numpy.array(baseX)
baseW= glw
baseW= list(baseW)
baseW.extend(glw)
baseW= numpy.array(baseW)
for ii in range(6):
if not coordGiven[ii]:
coordEval.append(nsigma*baseX)
weightEval.append(baseW)
jj+= 1
else:
coordEval.append(xy[ii]*numpy.ones(1))
weightEval.append(numpy.ones(1))
mgrid= numpy.meshgrid(*coordEval,indexing='ij')
mgridNotGiven= numpy.array([mgrid[ii].flatten() for ii in range(6)
if not coordGiven[ii]])
mgridNotGiven= numpy.dot(cholvar,mgridNotGiven)
jj= 0
if coordGiven[0]: iX= mgrid[0]
else:
iX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[1]: iY= mgrid[1]
else:
iY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[2]: iZ= mgrid[2]
else:
iZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[3]: ivX= mgrid[3]
else:
ivX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[4]: ivY= mgrid[4]
else:
ivY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[5]: ivZ= mgrid[5]
else:
ivZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
iXw, iYw, iZw, ivXw, ivYw, ivZw=\
numpy.meshgrid(*weightEval,indexing='ij')
if kwargs.get('lb',False): #Convert to Galactocentric cylindrical coordinates
#Setup coordinate transformation kwargs
Vnorm= kwargs.get('Vnorm',self._Vnorm)
Rnorm= kwargs.get('Rnorm',self._Rnorm)
R0= kwargs.get('R0',self._R0)
Zsun= kwargs.get('Zsun',self._Zsun)
vsun= kwargs.get('vsun',self._vsun)
tXYZ= bovy_coords.lbd_to_XYZ(iX.flatten(),iY.flatten(),
iZ.flatten(),
degree=True)
iR,iphi,iZ= bovy_coords.XYZ_to_galcencyl(tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],
Xsun=R0,Ysun=0.,Zsun=Zsun)
tvxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(ivX.flatten(),
ivY.flatten(),
ivZ.flatten(),
tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],XYZ=True)
ivR,ivT,ivZ= bovy_coords.vxvyvz_to_galcencyl(tvxvyvz[:,0],
tvxvyvz[:,1],
tvxvyvz[:,2],
iR,iphi,iZ,
galcen=True,
vsun=vsun)
iR/= Rnorm
iZ/= Rnorm
ivR/= Vnorm
ivT/= Vnorm
ivZ/= Vnorm
else:
#Convert to cylindrical coordinates
iR,iphi,iZ=\
bovy_coords.rect_to_cyl(iX.flatten(),iY.flatten(),iZ.flatten())
ivR,ivT,ivZ=\
bovy_coords.rect_to_cyl_vec(ivX.flatten(),ivY.flatten(),
ivZ.flatten(),
iR,iphi,iZ,cyl=True)
#Add the additional Jacobian dXdY/dldb... if necessary
if kwargs.get('lb',False):
#Find the nearest track point
interp= kwargs.get('interp',self._useInterp)
if not 'cindx' in kwargs:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Only l,b,d,... to Galactic X,Y,Z,... is necessary because going
#from Galactic to Galactocentric has Jacobian determinant 1
if interp:
addLogDet= self._interpolatedTrackLogDetJacLB[cindx]
else:
addLogDet= self._trackLogDetJacLB[cindx]
else:
addLogDet= 0.
logdf= self(iR,ivR,ivT,iZ,ivZ,iphi,log=True)
return logsumexp(logdf
+numpy.log(iXw.flatten())
+numpy.log(iYw.flatten())
+numpy.log(iZw.flatten())
+numpy.log(ivXw.flatten())
+numpy.log(ivYw.flatten())
+numpy.log(ivZw.flatten()))\
+0.5*numpy.log(numpy.linalg.det(gaussvar))\
+addLogDet
def gaussApprox(self,xy,**kwargs):
"""
NAME:
gaussApprox
PURPOSE:
return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned
OUTPUT:
(mean,variance) of the approximate Gaussian DF for the missing directions in xy
HISTORY:
2013-12-12 - Written - Bovy (IAS)
"""
interp= kwargs.get('interp',self._useInterp)
lb= kwargs.get('lb',False)
#What are we looking for
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
nGiven= numpy.sum(coordGiven)
#First find the nearest track point
if not 'cindx' in kwargs and lb:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
elif not 'cindx' in kwargs and not lb:
cindx= self._find_closest_trackpoint(*xy,xy=True,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Get the covariance matrix
if interp and lb:
tcov= self._interpolatedAllErrCovsLBUnscaled[cindx]
tmean= self._interpolatedObsTrackLB[cindx]
elif interp and not lb:
tcov= self._interpolatedAllErrCovsXY[cindx]
tmean= self._interpolatedObsTrackXY[cindx]
elif not interp and lb:
tcov= self._allErrCovsLBUnscaled[cindx]
tmean= self._ObsTrackLB[cindx]
elif not interp and not lb:
tcov= self._allErrCovsXY[cindx]
tmean= self._ObsTrackXY[cindx]
if lb:#Apply scale factors
tcov= copy.copy(tcov)
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1))
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)).T
#Fancy indexing to recover V22, V11, and V12; V22, V11, V12 as in Appendix B of 0905.2979v1
V11indx0= numpy.array([[ii for jj in range(6-nGiven)] for ii in range(6) if not coordGiven[ii]])
V11indx1= numpy.array([[ii for ii in range(6) if not coordGiven[ii]] for jj in range(6-nGiven)])
V11= tcov[V11indx0,V11indx1]
V22indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if coordGiven[ii]])
V22indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(nGiven)])
V22= tcov[V22indx0,V22indx1]
V12indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if not coordGiven[ii]])
V12indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(6-nGiven)])
V12= tcov[V12indx0,V12indx1]
#Also get m1 and m2, again following Appendix B of 0905.2979v1
m1= tmean[True-coordGiven]
m2= tmean[coordGiven]
#conditional mean and variance
V22inv= numpy.linalg.inv(V22)
v2= numpy.array([xy[ii] for ii in range(6) if coordGiven[ii]])
condMean= m1+numpy.dot(V12,numpy.dot(V22inv,v2-m2))
condVar= V11-numpy.dot(V12,numpy.dot(V22inv,V12.T))
return (condMean,condVar)
################################SAMPLE THE DF##################################
def sample(self,n,returnaAdt=False,returndt=False,interp=None,
xy=False,lb=False,
Vnorm=None,Rnorm=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
sample
PURPOSE:
sample from the DF
INPUT:
n - number of points to return
returnaAdt= (False) if True, return (Omega,angle,dt)
returndT= (False) if True, also return the time since the star was stripped
interp= (object-wide default) use interpolation of the stream track
xy= (False) if True, return Galactocentric rectangular coordinates
lb= (False) if True, return Galactic l,b,d,vlos,pmll,pmbb coordinates
+Coordinate transformation inputs (all default to the instance-wide
values):
Vnorm= circular velocity to normalize velocities with
Rnorm= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(R,vR,vT,z,vz,phi) of points on the stream in 6,N array
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if interp is None:
interp= self._useInterp
#First sample frequencies
#Sample frequency along largest eigenvalue using ARS
dO1s=\
bovy_ars.bovy_ars([0.,0.],[True,False],
[self._meandO-numpy.sqrt(self._sortedSigOEig[2]),
self._meandO+numpy.sqrt(self._sortedSigOEig[2])],
_h_ars,_hp_ars,nsamples=n,
hxparams=(self._meandO,self._sortedSigOEig[2]),
maxn=100)
dO1s= numpy.array(dO1s)*self._sigMeanSign
dO2s= numpy.random.normal(size=n)*numpy.sqrt(self._sortedSigOEig[1])
dO3s= numpy.random.normal(size=n)*numpy.sqrt(self._sortedSigOEig[0])
#Rotate into dOs in R,phi,z coordinates
dO= numpy.vstack((dO3s,dO2s,dO1s))
dO= numpy.dot(self._sigomatrixEig[1][:,self._sigomatrixEigsortIndx],
dO)
Om= dO+numpy.tile(self._progenitor_Omega.T,(n,1)).T
#Also generate angles
da= numpy.random.normal(size=(3,n))*self._sigangle
#And a random time
dt= numpy.random.uniform(size=n)*self._tdisrupt
#Integrate the orbits relative to the progenitor
da+= dO*numpy.tile(dt,(3,1))
angle= da+numpy.tile(self._progenitor_angle.T,(n,1)).T
if returnaAdt:
return (Om,angle,dt)
#Propagate to R,vR,etc.
RvR= self._approxaAInv(Om[0,:],Om[1,:],Om[2,:],
angle[0,:],angle[1,:],angle[2,:],
interp=interp)
if returndt and not xy and not lb:
return (RvR,dt)
elif not xy and not lb:
return RvR
if xy:
sX= RvR[0]*numpy.cos(RvR[5])
sY= RvR[0]*numpy.sin(RvR[5])
sZ= RvR[3]
svX, svY, svZ=\
bovy_coords.cyl_to_rect_vec(RvR[1],
RvR[2],
RvR[4],
RvR[5])
out= numpy.empty((6,n))
out[0]= sX
out[1]= sY
out[2]= sZ
out[3]= svX
out[4]= svY
out[5]= svZ
if returndt:
return (out,dt)
else:
return out
if lb:
if Vnorm is None:
Vnorm= self._Vnorm
if Rnorm is None:
Rnorm= self._Rnorm
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
XYZ= bovy_coords.galcencyl_to_XYZ(RvR[0]*Rnorm,
RvR[5],
RvR[3]*Rnorm,
Xsun=R0,Zsun=Zsun)
vXYZ= bovy_coords.galcencyl_to_vxvyvz(RvR[1]*Vnorm,
RvR[2]*Vnorm,
RvR[4]*Vnorm,
RvR[5],
vsun=vsun)
slbd=bovy_coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= bovy_coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
out= numpy.empty((6,n))
out[0]= slbd[:,0]
out[1]= slbd[:,1]
out[2]= slbd[:,2]
out[3]= svlbd[:,0]
out[4]= svlbd[:,1]
out[5]= svlbd[:,2]
if returndt:
return (out,dt)
else:
return out
def _h_ars(x,params):
"""ln p(Omega) for ARS"""
mO, sO2= params
return -0.5*(x-mO)**2./sO2+numpy.log(x)
def _hp_ars(x,params):
"""d ln p(Omega) / d Omega for ARS"""
mO, sO2= params
return -(x-mO)/sO2+1./x
def _determine_stream_track_single(aA,progenitorTrack,trackt,
progenitor_angle,sigMeanSign,
dsigomeanProgDirection,meanOmega,
thetasTrack):
#Setup output
allAcfsTrack= numpy.empty((9))
alljacsTrack= numpy.empty((6,6))
allinvjacsTrack= numpy.empty((6,6))
ObsTrack= numpy.empty((6))
ObsTrackAA= numpy.empty((6))
detdOdJ= numpy.empty(6)
#Calculate
tacfs= aA.actionsFreqsAngles(progenitorTrack(trackt),
maxn=3)
allAcfsTrack[0]= tacfs[0][0]
allAcfsTrack[1]= tacfs[1][0]
allAcfsTrack[2]= tacfs[2][0]
for jj in range(3,9):
allAcfsTrack[jj]= tacfs[jj]
tjac= calcaAJac(progenitorTrack(trackt)._orb.vxvv,
aA,
dxv=None,actionsFreqsAngles=True,
lb=False,
_initacfs=tacfs)
alljacsTrack[:,:]= tjac[3:,:]
tinvjac= numpy.linalg.inv(tjac[3:,:])
allinvjacsTrack[:,:]= tinvjac
#Also store detdOdJ
jindx= numpy.array([True,True,True,False,False,False,True,True,True],
dtype='bool')
dOdJ= numpy.dot(tjac[3:,:],numpy.linalg.inv(tjac[jindx,:]))[0:3,0:3]
detdOdJ= numpy.linalg.det(dOdJ)
theseAngles= numpy.mod(progenitor_angle\
+thetasTrack\
*sigMeanSign\
*dsigomeanProgDirection,
2.*numpy.pi)
ObsTrackAA[3:]= theseAngles
diffAngles= theseAngles-allAcfsTrack[6:]
diffAngles[(diffAngles > numpy.pi)]= diffAngles[(diffAngles > numpy.pi)]-2.*numpy.pi
diffAngles[(diffAngles < -numpy.pi)]= diffAngles[(diffAngles < -numpy.pi)]+2.*numpy.pi
thisFreq= meanOmega(thetasTrack)
ObsTrackAA[:3]= thisFreq
diffFreqs= thisFreq-allAcfsTrack[3:6]
ObsTrack[:]= numpy.dot(tinvjac,
numpy.hstack((diffFreqs,diffAngles)))
ObsTrack[0]+= \
progenitorTrack(trackt).R()
ObsTrack[1]+= \
progenitorTrack(trackt).vR()
ObsTrack[2]+= \
progenitorTrack(trackt).vT()
ObsTrack[3]+= \
progenitorTrack(trackt).z()
ObsTrack[4]+= \
progenitorTrack(trackt).vz()
ObsTrack[5]+= \
progenitorTrack(trackt).phi()
return [allAcfsTrack,alljacsTrack,allinvjacsTrack,ObsTrack,ObsTrackAA,
detdOdJ]
def _determine_stream_spread_single(sigomatrixEig,
thetasTrack,
sigOmega,
sigAngle,
allinvjacsTrack):
"""sigAngle input may either be a function that returns the dispersion in
perpendicular angle as a function of parallel angle, or a value"""
#Estimate the spread in all frequencies and angles
sigObig2= sigOmega(thetasTrack)**2.
tsigOdiag= copy.copy(sigomatrixEig[0])
tsigOdiag[numpy.argmax(tsigOdiag)]= sigObig2
tsigO= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigOdiag),
numpy.linalg.inv(sigomatrixEig[1])))
#angles
if hasattr(sigAngle,'__call__'):
sigangle2= sigAngle(thetasTrack)**2.
else:
sigangle2= sigAngle**2.
tsigadiag= numpy.ones(3)*sigangle2
tsigadiag[numpy.argmax(tsigOdiag)]= 1.
tsiga= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigadiag),
numpy.linalg.inv(sigomatrixEig[1])))
#correlations, assume half correlated for now (can be calculated)
correlations= numpy.diag(0.5*numpy.ones(3))*numpy.sqrt(tsigOdiag*tsigadiag)
correlations[numpy.argmax(tsigOdiag),numpy.argmax(tsigOdiag)]= 0.
correlations= numpy.dot(sigomatrixEig[1],
numpy.dot(correlations,
numpy.linalg.inv(sigomatrixEig[1])))
#Now convert
fullMatrix= numpy.empty((6,6))
fullMatrix[:3,:3]= tsigO
fullMatrix[3:,3:]= tsiga
fullMatrix[3:,:3]= correlations
fullMatrix[:3,3:]= correlations.T
return numpy.dot(allinvjacsTrack,numpy.dot(fullMatrix,allinvjacsTrack.T))
def calcaAJac(xv,aA,dxv=None,freqs=False,dOdJ=False,actionsFreqsAngles=False,
lb=False,coordFunc=None,
Vnorm=220.,Rnorm=8.,R0=8.,Zsun=0.025,vsun=[-11.1,8.*30.24,7.25],
_initacfs=None):
"""
NAME:
calcaAJac
PURPOSE:
calculate the Jacobian d(J,theta)/d(x,v)
INPUT:
xv - phase-space point: Either
1) [R,vR,vT,z,vz,phi]
2) [l,b,D,vlos,pmll,pmbb] (if lb=True, see below)
3) list/array of 6 numbers that can be transformed into (normalized) R,vR,vT,z,vz,phi using coordFunc
aA - actionAngle instance
dxv - infinitesimal to use (rescaled for lb, so think fractionally))
freqs= (False) if True, go to frequencies rather than actions
dOdJ= (False), actually calculate d Frequency / d action
actionsFreqsAngles= (False) if True, calculate d(action,freq.,angle)/d (xv)
lb= (False) if True, start with (l,b,D,vlos,pmll,pmbb) in (deg,deg,kpc,km/s,mas/yr,mas/yr)
Vnorm= (220) circular velocity to normalize with when lb=True
Rnorm= (8) Galactocentric radius to normalize with when lb=True
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.025) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
coordFunc= (None) if set, this is a function that takes xv and returns R,vR,vT,z,vz,phi in normalized units (units where vc=1 at r=1 if the potential is normalized that way, for example)
OUTPUT:
Jacobian matrix
HISTORY:
2013-11-25 - Written - Bovy (IAS)
"""
if lb:
coordFunc= lambda x: lbCoordFunc(xv,Vnorm,Rnorm,R0,Zsun,vsun)
if not coordFunc is None:
R, vR, vT, z, vz, phi= coordFunc(xv)
else:
R, vR, vT, z, vz, phi= xv[0],xv[1],xv[2],xv[3],xv[4],xv[5]
if dxv is None:
dxv= 10.**-8.*numpy.ones(6)
if lb:
#Re-scale some of the differences, to be more natural
dxv[0]*= 180./numpy.pi
dxv[1]*= 180./numpy.pi
dxv[2]*= Rnorm
dxv[3]*= Vnorm
dxv[4]*= Vnorm/4.74047/xv[2]
dxv[5]*= Vnorm/4.74047/xv[2]
if actionsFreqsAngles:
jac= numpy.zeros((9,6))
else:
jac= numpy.zeros((6,6))
if dOdJ:
jac2= numpy.zeros((6,6))
if _initacfs is None:
jr,lz,jz,Or,Ophi,Oz,ar,aphi,az\
= aA.actionsFreqsAngles(R,vR,vT,z,vz,phi,maxn=3)
else:
jr,lz,jz,Or,Ophi,Oz,ar,aphi,az\
= _initacfs
for ii in range(6):
temp= xv[ii]+dxv[ii] #Trick to make sure dxv is representable
dxv[ii]= temp-xv[ii]
xv[ii]+= dxv[ii]
if not coordFunc is None:
tR, tvR, tvT, tz, tvz, tphi= coordFunc(xv)
else:
tR, tvR, tvT, tz, tvz, tphi= xv[0],xv[1],xv[2],xv[3],xv[4],xv[5]
tjr,tlz,tjz,tOr,tOphi,tOz,tar,taphi,taz\
= aA.actionsFreqsAngles(tR,tvR,tvT,tz,tvz,tphi,maxn=3)
xv[ii]-= dxv[ii]
angleIndx= 3
if actionsFreqsAngles:
jac[0,ii]= (tjr-jr)/dxv[ii]
jac[1,ii]= (tlz-lz)/dxv[ii]
jac[2,ii]= (tjz-jz)/dxv[ii]
jac[3,ii]= (tOr-Or)/dxv[ii]
jac[4,ii]= (tOphi-Ophi)/dxv[ii]
jac[5,ii]= (tOz-Oz)/dxv[ii]
angleIndx= 6
elif freqs:
jac[0,ii]= (tOr-Or)/dxv[ii]
jac[1,ii]= (tOphi-Ophi)/dxv[ii]
jac[2,ii]= (tOz-Oz)/dxv[ii]
else:
jac[0,ii]= (tjr-jr)/dxv[ii]
jac[1,ii]= (tlz-lz)/dxv[ii]
jac[2,ii]= (tjz-jz)/dxv[ii]
if dOdJ:
jac2[0,ii]= (tOr-Or)/dxv[ii]
jac2[1,ii]= (tOphi-Ophi)/dxv[ii]
jac2[2,ii]= (tOz-Oz)/dxv[ii]
#For the angles, make sure we do not hit a turning point
if tar-ar > numpy.pi:
jac[angleIndx,ii]= (tar-ar-2.*numpy.pi)/dxv[ii]
elif tar-ar < -numpy.pi:
jac[angleIndx,ii]= (tar-ar+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx,ii]= (tar-ar)/dxv[ii]
if taphi-aphi > numpy.pi:
jac[angleIndx+1,ii]= (taphi-aphi-2.*numpy.pi)/dxv[ii]
elif taphi-aphi < -numpy.pi:
jac[angleIndx+1,ii]= (taphi-aphi+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx+1,ii]= (taphi-aphi)/dxv[ii]
if taz-az > numpy.pi:
jac[angleIndx+2,ii]= (taz-az-2.*numpy.pi)/dxv[ii]
if taz-az < -numpy.pi:
jac[angleIndx+2,ii]= (taz-az+2.*numpy.pi)/dxv[ii]
else:
jac[angleIndx+2,ii]= (taz-az)/dxv[ii]
if dOdJ:
jac2[3,:]= jac[3,:]
jac2[4,:]= jac[4,:]
jac2[5,:]= jac[5,:]
jac= numpy.dot(jac2,numpy.linalg.inv(jac))[0:3,0:3]
return jac
def lbCoordFunc(xv,Vnorm,Rnorm,R0,Zsun,vsun):
#Input is (l,b,D,vlos,pmll,pmbb) in (deg,deg,kpc,km/s,mas/yr,mas/yr)
X,Y,Z= bovy_coords.lbd_to_XYZ(xv[0],xv[1],xv[2],degree=True)
R,phi,Z= bovy_coords.XYZ_to_galcencyl(X,Y,Z,
Xsun=R0,Ysun=0.,Zsun=Zsun)
vx,vy,vz= bovy_coords.vrpmllpmbb_to_vxvyvz(xv[3],xv[4],xv[5],
X,Y,Z,XYZ=True)
vR,vT,vZ= bovy_coords.vxvyvz_to_galcencyl(vx,vy,vz,R,phi,Z,galcen=True,
vsun=vsun)
R/= Rnorm
Z/= Rnorm
vR/= Vnorm
vT/= Vnorm
vZ/= Vnorm
return (R,vR,vT,Z,vZ,phi)
|
followthesheep/galpy
|
galpy/df_src/streamdf.py
|
Python
|
bsd-3-clause
| 117,381
|
[
"Gaussian"
] |
b420806e285f6c3cd071744aecf57f020d46e4cfb4703fb37ccc48867799f233
|
""" TokenAgent
This agent inspect all elements, and resets their tokens if necessary.
The following options can be set for the TokenAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TokenAgent
:end-before: ##END
:dedent: 2
:caption: TokenAgent options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
AGENT_NAME = 'ResourceStatus/TokenAgent'
class TokenAgent(AgentModule):
"""
TokenAgent is in charge of checking tokens assigned on resources.
Notifications are sent to those users owning expiring tokens.
"""
# Rss token
__rssToken = 'rs_svc'
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
self.notifyHours = 12
self.adminMail = ''
self.rsClient = None
self.tokenDict = {}
self.diracAdmin = None
def initialize(self):
""" TokenAgent initialization
"""
self.notifyHours = self.am_getOption('notifyHours', self.notifyHours)
self.adminMail = self.am_getOption('adminMail', self.adminMail)
self.rsClient = ResourceStatusClient()
self.diracAdmin = DiracAdmin()
return S_OK()
def execute(self):
"""
Looks for user tokens. If they are expired, or expiring, it notifies users.
"""
# Initialized here, as it is needed empty at the beginning of the execution
self.tokenDict = {}
elements = ('Site', 'Resource', 'Node')
for element in elements:
self.log.info('Processing %s' % element)
interestingTokens = self._getInterestingTokens(element)
if not interestingTokens['OK']:
self.log.error(interestingTokens['Message'])
continue
interestingTokens = interestingTokens['Value']
processTokens = self._processTokens(element, interestingTokens)
if not processTokens['OK']:
self.log.error(processTokens['Message'])
continue
notificationResult = self._notifyOfTokens()
if not notificationResult['OK']:
self.log.error(notificationResult['Message'])
return S_OK()
def _getInterestingTokens(self, element):
"""
Given an element, picks all the entries with TokenExpiration < now + X<hours>
If the TokenOwner is not the rssToken ( rs_svc ), it is selected.
"""
tokenExpLimit = datetime.utcnow() + timedelta(hours=self.notifyHours)
tokenElements = self.rsClient.selectStatusElement(
element, 'Status',
meta={'older': ['TokenExpiration', tokenExpLimit]})
if not tokenElements['OK']:
return tokenElements
tokenColumns = tokenElements['Columns']
tokenElements = tokenElements['Value']
interestingTokens = []
for tokenElement in tokenElements:
tokenElement = dict(zip(tokenColumns, tokenElement))
if tokenElement['TokenOwner'] != self.__rssToken:
interestingTokens.append(tokenElement)
return S_OK(interestingTokens)
def _processTokens(self, element, tokenElements):
"""
Given an element and a list of interesting token elements, updates the
database if the token is expired, logs a message and adds
"""
never = datetime.max
for tokenElement in tokenElements:
try:
name = tokenElement['Name']
statusType = tokenElement['StatusType']
status = tokenElement['Status']
tokenOwner = tokenElement['TokenOwner']
tokenExpiration = tokenElement['TokenExpiration']
except KeyError as e:
return S_ERROR(e)
# If token has already expired
if tokenExpiration < datetime.utcnow():
_msg = '%s with statusType "%s" and owner %s EXPIRED'
self.log.info(_msg % (name, statusType, tokenOwner))
result = self.rsClient.addOrModifyStatusElement(element, 'Status', name=name,
statusType=statusType,
tokenOwner=self.__rssToken,
tokenExpiration=never)
if not result['OK']:
return result
else:
_msg = '%s with statusType "%s" and owner %s -> %s'
self.log.info(_msg % (name, statusType, tokenOwner, tokenExpiration))
if tokenOwner not in self.tokenDict:
self.tokenDict[tokenOwner] = []
self.tokenDict[tokenOwner].append([tokenOwner, element, name, statusType, status, tokenExpiration])
return S_OK()
def _notifyOfTokens(self):
"""
Splits interesing tokens between expired and expiring. Also splits them
among users. It ends sending notifications to the users.
"""
now = datetime.utcnow()
adminExpired = []
adminExpiring = []
for tokenOwner, tokenLists in self.tokenDict.items():
expired = []
expiring = []
for tokenList in tokenLists:
if tokenList[5] < now:
expired.append(tokenList)
adminExpired.append(tokenList)
else:
expiring.append(tokenList)
adminExpiring.append(tokenList)
resNotify = self._notify(tokenOwner, expired, expiring)
if not resNotify['OK']:
self.log.error('Failed to notify token owner', resNotify['Message'])
if (adminExpired or adminExpiring) and self.adminMail:
return self._notify(self.adminMail, adminExpired, adminExpiring)
return S_OK()
def _notify(self, tokenOwner, expired, expiring):
"""
Given a token owner and a list of expired and expiring tokens, sends an
email to the user.
"""
subject = 'RSS token summary for tokenOwner %s' % tokenOwner
mail = '\nEXPIRED tokens ( RSS has taken control of them )\n'
for tokenList in expired:
mail += ' '.join([str(x) for x in tokenList])
mail += '\n'
mail = '\nEXPIRING tokens ( RSS will take control of them )\n'
for tokenList in expiring:
mail += ' '.join([str(x) for x in tokenList])
mail += '\n'
mail += "\n\n You can extend for another 24 hours using the web interface (Set token -> Acquire)\n"
mail += " Or you can use the dirac-rss-set-token script\n\n"
mail += "Through the same interfaces you can release the token any time\n"
resEmail = self.diracAdmin.sendMail(tokenOwner, subject, mail)
if not resEmail['OK']:
return S_ERROR('Cannot send email to user "%s"' % tokenOwner)
return resEmail
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Agent/TokenAgent.py
|
Python
|
gpl-3.0
| 6,701
|
[
"DIRAC"
] |
5cc6fbd3f405a349f9e226a2c7cfaa625a7527d58de0e8830efdd0c7c68a1688
|
#process all record
#for each record find template for spike in each channel
#then compute signal to noise ratio (snr) for each cluster
import pickle
import signal_processing as sig_proc
import numpy as np
import copy
import matplotlib.pyplot as plt
dir_name = '../data/r415/'
img_ext = '.eps'
save_img = True
show = False
save_obj = False
#signal filtering parameter
low_cut = 3e2
high_cut = 3e3
#spike finding parameters
b_spike = 6
a_spike = 20
spike_thresh = -4
#kohonen parameter
koho_col = 4
koho_row = 8
weight_count = a_spike + b_spike
max_weight = spike_thresh * 2
alpha = 0.1 #learning coef
neighbor = 4 #number of neighbor to modified
min_win = 2 #number of time a neuron should win to be a good neuron
dist_thresh = 5 #distance from which it's acceptable to create a new class
#cluster parameter
min_clus_abs = 20 #minimum cluster size (absolute value)
min_clus_rel = 0.01 #minimum cluster size (relative value)
threshold_template = 4 #distance from which it's acceptable to put spike in class
base_name = 'r415_'
record_name = ['130926', '131008', '131009', '131011', '131016', '131017', '131018', '131021', '131023', '131025',
'131030', '131101', '131118', '131129']
record_data = {}
with open(dir_name + 'templates', 'rb') as my_file:
all_chan_templates = pickle.load(my_file)
sp = sig_proc.Signal_processing(save_img, show, img_ext)
global_snr = []
global_cell_count=[]
for record in record_name:
# record = record_name[0]
print('----- processing record: ' + record + ' -----')
signal = sp.load_m(dir_name + base_name + record + '.mat', 'd')#load multichannel signal
fs = float(sp.load_m(dir_name + 'fech.mat', 'sampFreq')) #load sample frequency
fsignal = sp.signal_mc_filtering(signal, low_cut, high_cut, fs)
signal_noise_ratio_r415 = []
cell_count=0
for chan in range(fsignal.shape[0]):
print('\n\n--- processing chan : ' + str(chan + 1) + ' ---')
s = fsignal[chan]
#signal dispersion
sig_mean = np.array(fsignal[chan]).mean()
sig_std = np.array(fsignal[chan]).std()
min_sig = sig_mean-2*sig_std
max_sig = sig_mean+2*sig_std
#find spike using threshold and smooth them
spikes_values, spikes_time = sp.find_spikes(s, a_spike, b_spike, spike_thresh)
print('spikes found: ' + str(spikes_values.shape[0]))
spikes_values = sp.smooth_spikes(spikes_values, 3)
#find template for spikes
koho = sp.find_spike_template_kohonen(spikes_values, koho_col, koho_row, weight_count, max_weight, alpha, neighbor,
min_win, dist_thresh)
#keep best cluster aka groups
min_clus = max(min_clus_abs, min_clus_rel * spikes_values.shape[0])
koho.evaluate_group(spikes_values, 2 * dist_thresh, min_clus)#keep only groups that have more spike than min_clus
cell_count += len(koho.groups)
for group in koho.groups:
if np.array(group.spikes).shape[0] > 0:
max_spike = np.array(group.spikes).max(1).mean()
min_spike = np.array(group.spikes).min(1).mean()
signal_noise_ratio_r415.append((max_spike-min_spike)/(max_sig-min_sig))
else:
signal_noise_ratio_r415.append(0)
global_cell_count.append(cell_count)
global_snr.append(copy.copy(signal_noise_ratio_r415))
box_plot=[]
plt.figure()
plt.boxplot(global_snr)
#compute mean snr for each experiment and plot
snr_mean = []
for l in global_snr:
snr_mean.append(np.array(l).mean())
plt.plot(snr_mean)
if save_img:
plt.savefig('box_plot_snr_r415_new'+img_ext, bbox_inches='tight')
if show:
plt.show()
else:
plt.close()
plt.figure()
plt.plot(global_cell_count)
plt.hlines(32, 0, len(record_name))
plt.hlines(64, 0, len(record_name))
if save_img:
plt.savefig('neuron_evo_r415_new'+img_ext, bbox_inches='tight')
if show:
plt.show()
else:
plt.close()
print('\n\n#################')
print('#### END ####')
|
scauglog/brain_record_toolbox
|
script_r415_snr_evo_with_new_pattern.py
|
Python
|
mit
| 4,035
|
[
"NEURON"
] |
4698af917abf929b963d64981f1a620b92a7d6618409f6df2deabb72391751fa
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
from .. import base
from .. import utils
class AxisSource(base.ChiggerFilterSourceBase):
"""
Creates a Axis source for use with the ColorBar.
"""
VTKACTOR_TYPE = vtk.vtkContextActor
@staticmethod
def getOptions():
opt = base.ChiggerFilterSourceBase.getOptions()
opt += utils.AxisOptions.get_options()
return opt
def __init__(self, **kwargs):
super(AxisSource, self).__init__(vtkactor_type=vtk.vtkContextActor, vtkmapper_type=None,
**kwargs)
self._vtksource = vtk.vtkAxis()
self._vtkactor.GetScene().AddItem(self._vtksource)
def getVTKSource(self):
"""
Return the vtkAxis object.
"""
return self._vtksource
def update(self, **kwargs):
"""
Update the vtkAxis with given settings. (override)
Inputs:
see ChiggerFilterSourceBase
"""
super(AxisSource, self).update(**kwargs)
utils.AxisOptions.set_options(self._vtksource, self._options)
self._vtksource.Update()
|
nuclear-wizard/moose
|
python/chigger/misc/AxisSource.py
|
Python
|
lgpl-2.1
| 1,445
|
[
"MOOSE",
"VTK"
] |
9ed3d59fd3814c132ef500fbdee5c0dd805eca4a9992d7ed59762c040593d7a4
|
# physcon Physical constants
# Note: type physcon.help() (after import physcon)
from math import pi
# define light velocity, because we need it in calculations
cloc =299792458.
# dictionary of physical constants in SI units. Values CODATA 2010 http://www.physics.nist.gov/cuu/Constants/
# each item: [description (string), symbol (string), value (float), sd (float), relat. sd (float),
# value(sd) unit (string), source (string)]
all={'lightvel':['velocity of light in vacuum','c',cloc,0., 0.,'299 792 458(ex) m/s', 'CODATA 2010'],
'planck':["Planck's constant",'h',6.62606957e-34,2.9e-41,4.4e-8,'6.626 069 57(29) e-34 J s', 'CODATA 2010'],
'dirac':["Dirac's constant = h/(2 pi)",'hbar',1.054571726e-34,4.7e-42,4.4e-8,'1.054 571 726(47) e-34 J s', 'CODATA 2010 '],
'magn-const':['magnetic permeability of vacuum','mu_0',4.e-7*pi,0.,0.,'1.256 637 061... e-6 N A^-2',''],
'elec-const':['dielectric permittivity of vacuum','eps_0',1.e7/(4*pi*cloc*cloc),0.,0.,'8.854 187 817... e-12 F/m',''],
'gravit':['Newton constant of gravitation','G',6.67384e-11, 8.0e-15,1.2e-4,'6.673 84(80) e-11 m^3 kg^-1 s^-2','CODATA 2010'],
'charge-e':['elementary charge','e',1.602176565e-19,3.5e-27,2.2e-8,'1.602 176 565(35) e-19 C','CODATA 2010'],
'mass-e':['electron mass','m_e',9.10938291e-31,4.0e-38,4.4e-8,'9.109 382 91(40) e-31 kg','CODATA 2010'],
'mass-e/u':['electron mass in u','m_e_u',5.4857990946e-4,2.2e-13,4.0e-10,'5.485 799 0946(22) u','CODATA 2010'],
'mass-p':['proton mass','m_p',1.672621777e-27,7.4e-35,4.4e-8,'1.672 621 777(74) e-27 kg','CODATA 2010'],
'mass-p/u':['proton mass in u','m_p_u',1.007276466812,9.0e-11,8.9e-11,'1.007 276 466 812(90) u','CODATA 2010'],
'mass-n':['neutron mass','m_n',1.674927351e-27,7.4e-35,4.4e-8,'1.674 927 351(74) e-27 kg','CODATA 2010'],
'mass-n/u':['neutron mass in u','m_n_u',1.00866491600,4.3e-10,4.2e-10,'1.008 664 916 00(43) u','CODATA 2010'],
'mass-d':['deuteron mass','m_d',3.34358348e-27,1.5e-34,4.4e-8,'3.343 583 48(15) e-27 kg','CODATA 2010'],
'mass-d/u':['deuteron mass in u','m_d_u',2.013553212712,7.7e-11,3.8e-11,'2.013 553 212 712(77) u','CODATA 2010'],
'mass-mu':['muon mass','m_m',1.883531475e-28,9.6e-36,5.1e-8,'1.883 531 475(96) e-28 kg','CODATA 2010'],
'mass-mu/u':['muon mass in u','m_m_u',0.1134289267,2.9e-9,2.5e-8,'0.113 428 9267(29) u','CODATA 2010'],
'ratio-me/mp':['electron/proton mass ratio','ratio_memp',5.4461702178e-4,2.2e-13,4.1e-10,'5.446 170 2178(22) e-4','CODATA 2010'],
'ratio-mp/me':['proton/electron mass ratio','ratio_mpme',1836.15267245,7.5e-7,4.1e-10,'1836.152 672 45(75)','CODATA 2010'],
'amu':['unified atomic mass unit = 1/12 m(12C)','u',1.660538921e-27,7.3e-35,4.4e-8,'1.660 538 921(73) e-27 kg','CODATA 2010'],
'avogadro':['Avogadro constant','N_A',6.02214129e23,2.7e16,4.4e-8,'6.022 141 29(27) e23 mol^-1','CODATA 2010'],
'boltzmann':['Boltzmann constant','k_B',1.3806488e-23,1.3e-29,9.1e-7,'1.380 6488(13) e-23 J/K','CODATA 2010'],
'gas':['molar gas constant = N_A k_B','R',8.3144621,7.5e-6,9.1e-7,'8.314 4621(75) J mol^-1 K^-1','CODATA 2010'],
'faraday':['Faraday constant = N_A e','F',96485.3365,2.1e-3,2.2e-8,'96 485.3365(21) C/mol','CODATA 2010'],
'bohrradius':['Bohr radius = 4 pi eps_0 hbar^2/(m_e e^2)','a_0',5.2917721092e-11,1.7e-20,3.2e-10,'0.529 177 210 92(17) e-10 m','CODATA 2010'],
'magflux-qu':['magnetic flux quantum = h/(2 e)','Phi_0',2.067833758e-15,4.6e-23,2.2e-8,'2.067 833 758(46) Wb','CODATA 2010'],
'conduct-qu':['conductance quantum = 2 e^2/h','G_0',7.7480917346e-5,2.5e-14,3.2e-10,'7.748 091 7346(25) e-5 S','CODATA 2010'],
'josephson':['Josephson constant = 2 e/h','K_J',4.83597870e14, 1.1e7,2.2e-8,'4.835 978 70(11) e14 Hz/V','CODATA 2010'],
'bohrmagn':['Bohr magneton = e hbar/(2 m_e)','mu_B',9.27400968e-24,2.0e-31,2.2e-8,'9.274 009 68(20) e-24 J/T','CODATA 2010'],
'nuclmagn':['nuclear magneton = e hbar/(2 m_p)','mu_N',5.05078353e-27,1.1e-34,2.2e-8,'5.050 783 53(11) e-27 J/T','CODATA 2010'],
'magnmom-e':['electron magnetic moment','mu_e',-9.28476430e-24,2.1e-31,2.2e-8,'-9.284 764 30(21) e-24 J/T','CODATA 2010'],
'magnmom-p':['proton magnetic moment','mu_p',1.410606743e-26,3.3e-33,2.4e-8,'1.410 606 743(33) e-26 J/T','CODATA 2010'],
'gfactor-e':['electron g-factor','g_e',-2.00231930436153,5.3e-13,2.6e-13,'-2.002 319 304 361 53(53)','CODATA 2010'],
'gfactor-p':['proton g-factor','g_p',5.585694713, 4.6e-8,8.2e-9,'5.585 694 713(46)','CODATA 2010'],
'alpha':['fine-structure constant = e^2/(4 pi eps_0 hbar c)','alpha',7.2973525698e-3,2.4e-12,3.2e-10,'7.297 352 5698(24) e-3','CODATA 2010'],
'alpha-1':['inverse fine-structure constant = 4 pi eps_0 hbar c/e^2','',137.035999074,4.4e-8,3.2e-10,'137.035 999 074(44)','CODATA 2010'],
'gyromagratio-p':['proton gyromagnetic ratio','gamma_p',2.675222005e8,6.3,2.4e-8,'2.675 222 005(63) e8 s^-1 T^-1','CODATA 2010'],
'magres-p':['magnetic resonance frequency proton = gamma_p/(2*pi)','',4.25774806e7,1.0,2.4e-8,'42.577 4806(10) MHz/T','CODATA 2010'],
'rydberg':['Rydberg constant = alpha^2 m_e c/(2 h)','R_infty',10973731.568539,5.5e-5,5.0e-12,'10 973 731.568 539(55) m^-1','CODATA 2010'],
'stefan-boltzm':['Stefan-Boltzmann constant = pi^2 k^4/(60 hbar^3 c^2)','sigma',5.670373e-8,2.1e-13,3.6e-6,'5.670 373(21) e-8 W m^-2 K^-4','CODATA 2010']}
# many common values are also available as global constants:
global alpha,a_0,c,e,eps_0,F,G,g_e,g_p,gamma_p,h,hbar,k_B
global m_d,m_e,m_n,m_p,mu_B,mu_e,mu_N,mu_p,mu_0,N_A,R,sigma,u
alpha = all['alpha'][2]
a_0 = all['bohrradius'][2]
c = cloc
e = all['charge-e'][2]
eps_0 = all['elec-const'][2]
F = all['faraday'][2]
G = all['gravit'][2]
g_e = all['gfactor-e'][2]
g_p = all['gfactor-p'][2]
gamma_p = all['gyromagratio-p'][2]
h = all['planck'][2]
hbar = all['dirac'][2]
k_B = all['boltzmann'][2]
m_d = all['mass-d'][2]
m_e = all['mass-e'][2]
m_n = all['mass-n'][2]
m_p = all['mass-p'][2]
mu_B = all['bohrmagn'][2]
mu_e = all['magnmom-e'][2]
mu_N = all['nuclmagn'][2]
mu_p = all['magnmom-p'][2]
mu_0 = all['magn-const'][2]
N_A = all['avogadro'][2]
R = all['gas'][2]
sigma = all['stefan-boltzm'][2]
u = all['amu'][2]
def help():
print('Available functions:')
print('[note: key must be a string, within quotes!]' )
print(' value(key) returns value (float)')
print(' sd(key) returns standard deviation (float)')
print(' relsd(key) returns relative standard deviation (float)')
print(' descr(key) prints description with units\n')
print('Available global variables:')
print(' alpha, a_0, c, e, eps_0, F, G, g_e, g_p, gamma_p, h, hbar, k_B')
print(' m_d, m_e, m_n, m_p, mu_B, mu_e, mu_N, mu_p, mu_0, N_A, R, sigma, u\n')
allkeys=sorted(all.keys())
print('Available keys:')
print(allkeys)
def value(key):
return all[key][2]
def sd(key):
return all[key][3]
def relsd(key):
return all[key][4]
def descr(key):
print('Description of ',key,':')
print(' Name: ',all[key][0])
print(' Symbol (if avail.): ',all[key][1])
print(' Value: ',all[key][2])
print(' Standard deviation: ',all[key][3])
print(' Relative stdev: ',all[key][4])
print(' value(sd) unit: ',all[key][5])
print(' Source: ',all[key][6],'\n')
|
trigfa/notebooks
|
physcon.py
|
Python
|
mit
| 7,418
|
[
"Avogadro",
"DIRAC"
] |
e0396dac9ba317a20d8608ac492e17aa82406def8f2670a6a3d98e53846ac7d4
|
r"""
File I/O (:mod:`skbio.io`)
==========================
.. currentmodule:: skbio.io
This package provides I/O functionality for skbio.
Supported file formats
----------------------
For details on what objects are supported by each format,
see the associated documentation.
.. currentmodule:: skbio.io.format
.. autosummary::
:toctree: generated/
blast6
blast7
clustal
embl
fasta
fastq
genbank
gff3
lsmat
newick
ordination
phylip
qseq
stockholm
.. currentmodule:: skbio.io.registry
User functions
--------------
.. autosummary::
:toctree: generated/
write
read
sniff
.. currentmodule:: skbio.io
User exceptions and warnings
----------------------------
.. autosummary::
:toctree: generated/
FormatIdentificationWarning
ArgumentOverrideWarning
UnrecognizedFormatError
IOSourceError
FileFormatError
BLAST7FormatError
ClustalFormatError
EMBLFormatError
FASTAFormatError
FASTQFormatError
GenBankFormatError
GFF3FormatError
LSMatFormatError
NewickFormatError
OrdinationFormatError
PhylipFormatError
QSeqFormatError
QUALFormatError
StockholmFormatError
Subpackages
-----------
.. autosummary::
:toctree: generated/
registry
util
For developer documentation on extending I/O, see :mod:`skbio.io.registry`.
Introduction to I/O
-------------------
Reading and writing files (I/O) can be a complicated task:
* A file format can sometimes be read into more than one in-memory
representation (i.e., object). For example, a FASTA file can be read into an
:mod:`skbio.alignment.TabularMSA` or :mod:`skbio.sequence.DNA` depending on
what operations you'd like to perform on your data.
* A single object might be writeable to more than one file format. For example,
an :mod:`skbio.alignment.TabularMSA` object could be written to FASTA, FASTQ,
CLUSTAL, or PHYLIP formats, just to name a few.
* You might not know the exact file format of your file, but you want to read
it into an appropriate object.
* You might want to read multiple files into a single object, or write an
object to multiple files.
* Instead of reading a file into an object, you might want to stream the file
using a generator (e.g., if the file cannot be fully loaded into memory).
To address these issues (and others), scikit-bio provides a simple, powerful
interface for dealing with I/O. We accomplish this by using a single I/O
registry.
What kinds of files scikit-bio can use
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To see a complete list of file-like inputs that can be used for reading,
writing, and sniffing, see the documentation for :func:`skbio.io.util.open`.
Reading files into scikit-bio
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two ways to read files. The first way is to use the
procedural interface:
.. code-block:: python
my_obj = skbio.io.read(file, format='someformat', into=SomeSkbioClass)
The second is to use the object-oriented (OO) interface which is automatically
constructed from the procedural interface:
.. code-block:: python
my_obj = SomeSkbioClass.read(file, format='someformat')
For example, to read a `newick` file using both interfaces you would type:
>>> from skbio import read
>>> from skbio import TreeNode
>>> from io import StringIO
>>> open_filehandle = StringIO('(a, b);')
>>> tree = read(open_filehandle, format='newick', into=TreeNode)
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
For the OO interface:
>>> open_filehandle = StringIO('(a, b);')
>>> tree = TreeNode.read(open_filehandle, format='newick')
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
In the case of :func:`skbio.io.registry.read` if `into` is not provided, then a
generator will be returned. What the generator yields will depend on what
format is being read.
When `into` is provided, format may be omitted and the registry will use its
knowledge of the available formats for the requested class to infer the correct
format. This format inference is also available in the OO interface, meaning
that `format` may be omitted there as well.
As an example:
>>> open_filehandle = StringIO('(a, b);')
>>> tree = TreeNode.read(open_filehandle)
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
We call format inference `sniffing`, much like the :class:`csv.Sniffer`
class of Python's standard library. The goal of a `sniffer` is twofold: to
identify if a file is a specific format, and if it is, to provide `**kwargs`
which can be used to better parse the file.
.. note:: There is a built-in `sniffer` which results in a useful error message
if an empty file is provided as input and the format was omitted.
Writing files from scikit-bio
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Just as when reading files, there are two ways to write files.
Procedural Interface:
.. code-block:: python
skbio.io.write(my_obj, format='someformat', into=file)
OO Interface:
.. code-block:: python
my_obj.write(file, format='someformat')
In the procedural interface, `format` is required. Without it, scikit-bio does
not know how you want to serialize an object. OO interfaces define a default
`format`, so it may not be necessary to include it.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from importlib import import_module
from ._warning import FormatIdentificationWarning, ArgumentOverrideWarning
from ._exception import (UnrecognizedFormatError, FileFormatError,
BLAST7FormatError, ClustalFormatError,
FASTAFormatError, GenBankFormatError, IOSourceError,
FASTQFormatError, LSMatFormatError, NewickFormatError,
OrdinationFormatError, PhylipFormatError,
QSeqFormatError, QUALFormatError,
StockholmFormatError, GFF3FormatError,
EMBLFormatError)
from .registry import write, read, sniff, create_format, io_registry
from .util import open
__all__ = ['write', 'read', 'sniff', 'open', 'io_registry', 'create_format',
'FormatIdentificationWarning', 'ArgumentOverrideWarning',
'UnrecognizedFormatError', 'IOSourceError',
'FileFormatError',
'BLAST7FormatError',
'ClustalFormatError',
'EMBLFormatError',
'FASTAFormatError',
'FASTQFormatError',
'GenBankFormatError',
'GFF3FormatError',
'LSMatFormatError',
'NewickFormatError',
'OrdinationFormatError',
'PhylipFormatError',
'QSeqFormatError',
'QUALFormatError',
'StockholmFormatError']
# Necessary to import each file format module to have them added to the I/O
# registry. We use import_module instead of a typical import to avoid flake8
# unused import errors.
import_module('skbio.io.format.blast6')
import_module('skbio.io.format.blast7')
import_module('skbio.io.format.clustal')
import_module('skbio.io.format.embl')
import_module('skbio.io.format.fasta')
import_module('skbio.io.format.fastq')
import_module('skbio.io.format.lsmat')
import_module('skbio.io.format.newick')
import_module('skbio.io.format.ordination')
import_module('skbio.io.format.phylip')
import_module('skbio.io.format.qseq')
import_module('skbio.io.format.genbank')
import_module('skbio.io.format.gff3')
import_module('skbio.io.format.stockholm')
# This is meant to be a handy indicator to the user that they have done
# something wrong.
import_module('skbio.io.format.emptyfile')
# Now that all of our I/O has loaded, we can add the object oriented methods
# (read and write) to each class which has registered I/O operations.
io_registry.monkey_patch()
|
gregcaporaso/scikit-bio
|
skbio/io/__init__.py
|
Python
|
bsd-3-clause
| 8,073
|
[
"scikit-bio"
] |
e42662bf8d04fab7bc1471a2247dc7afa0c88d5573b90ae349b4c64e82a68470
|
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" A module for molecules and stuff
see Chem/index.html in the doc tree for documentation
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Geometry import rdGeometry
import PeriodicTable as pyPeriodicTable
import rdchem
_HasSubstructMatchStr=rdchem._HasSubstructMatchStr
from rdchem import *
from rdmolfiles import *
from rdmolops import *
from inchi import *
def QuickSmartsMatch(smi,sma,unique=True,display=False):
m = MolFromSmiles(smi)
p = MolFromSmarts(sma)
res = m.GetSubstructMatches(p,unique)
if display:
pass
return res
def CanonSmiles(smi,useChiral=1):
m = MolFromSmiles(smi)
return MolToSmiles(m,useChiral)
def SupplierFromFilename(fileN,delim='',**kwargs):
ext = fileN.split('.')[-1].lower()
if ext=='sdf':
suppl = SDMolSupplier(fileN,**kwargs)
elif ext=='csv':
if not delim:
delim = ','
suppl = SmilesMolSupplier(fileN,delimiter=delim,**kwargs)
elif ext=='txt':
if not delim:
delim='\t'
suppl = SmilesMolSupplier(fileN,delimiter=delim,**kwargs)
elif ext=='tdt':
suppl = TDTMolSupplier(fileN,delimiter=delim,**kwargs)
else:
raise ValueError,"unrecognized extension: %s"%ext
return suppl
def FindMolChiralCenters(mol,force=True,includeUnassigned=False):
"""
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('[C@H](Cl)(F)Br')
>>> FindMolChiralCenters(mol)
[(0, 'R')]
>>> mol = Chem.MolFromSmiles('[C@@H](Cl)(F)Br')
>>> FindMolChiralCenters(mol)
[(0, 'S')]
>>> FindMolChiralCenters(Chem.MolFromSmiles('CCC'))
[]
By default unassigned stereo centers are not reported:
>>> mol = Chem.MolFromSmiles('C[C@H](F)C(F)(Cl)Br')
>>> FindMolChiralCenters(mol)
[(1, 'S')]
but this can be changed:
>>> FindMolChiralCenters(mol,includeUnassigned=True)
[(1, 'S'), (3, '?')]
The handling of dependent stereochemistry is not correct:
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1CC(C)C(C)C(C)C1'),includeUnassigned=True)
[(2, '?'), (6, '?')]
>>> Chem.FindMolChiralCenters(Chem.MolFromSmiles('C1C[C@H](C)C(C)[C@H](C)C1'),includeUnassigned=True)
[(2, 'S'), (6, 'R')]
"""
AssignStereochemistry(mol,force=force, flagPossibleStereoCenters=includeUnassigned)
centers = []
for atom in mol.GetAtoms():
if atom.HasProp('_CIPCode'):
centers.append((atom.GetIdx(),atom.GetProp('_CIPCode')))
elif includeUnassigned and atom.HasProp('_ChiralityPossible'):
centers.append((atom.GetIdx(),'?'))
return centers
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
rdkit/rdkit-orig
|
rdkit/Chem/__init__.py
|
Python
|
bsd-3-clause
| 3,165
|
[
"RDKit"
] |
cff4fa9d286b1ccec015d9fc4c3825c37358caceaed3b0aa29a07f8092a9a626
|
""" SiteStatus helper
Module that acts as a helper for knowing the status of a site.
It takes care of switching between the CS and the RSS.
The status is kept in the RSSCache object, which is a small wrapper on top of DictCache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
import six
import errno
import math
from time import sleep
from datetime import datetime, timedelta
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ResourceStatusSystem.Utilities.RSSCacheNoThread import RSSCache
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import RssConfiguration
@six.add_metaclass(DIRACSingleton)
class SiteStatus(object):
"""
RSS helper to interact with the 'Site' family on the DB. It provides the most
demanded functions and a cache to avoid hitting the server too often.
It provides four methods to interact with the site statuses:
* getSiteStatuses
* isUsableSite
* getUsableSites
* getSites
"""
def __init__(self):
"""
Constructor, initializes the rssClient.
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.rssConfig = RssConfiguration()
self.__opHelper = Operations()
self.rssFlag = ResourceStatus().rssFlag
self.rsClient = ResourceStatusClient()
cacheLifeTime = int(self.rssConfig.getConfigCache())
# RSSCache only affects the calls directed to RSS, if using the CS it is not used.
self.rssCache = RSSCache(cacheLifeTime, self.__updateRssCache)
def __updateRssCache(self):
""" Method used to update the rssCache.
It will try 5 times to contact the RSS before giving up
"""
meta = {'columns': ['Name', 'Status', 'VO']}
for ti in range(5):
rawCache = self.rsClient.selectStatusElement('Site', 'Status', meta=meta)
if rawCache['OK']:
break
self.log.warn("Can't get resource's status", rawCache['Message'] + "; trial %d" % ti)
sleep(math.pow(ti, 2))
self.rsClient = ResourceStatusClient()
if not rawCache['OK']:
return rawCache
return S_OK(getCacheDictFromRawData(rawCache['Value']))
def getSiteStatuses(self, siteNames=None):
"""
Method that queries the database for status of the sites in a given list.
A single string site name may also be provides as "siteNames"
If the input is None, it is interpreted as * ( all ).
If match is positive, the output looks like::
{
'test1.test1.org': 'Active',
'test2.test2.org': 'Banned',
}
Examples::
>>> siteStatus.getSiteStatuses( ['test1.test1.uk', 'test2.test2.net', 'test3.test3.org'] )
S_OK( { 'test1.test1.org': 'Active', 'test2.test2.net': 'Banned', 'test3.test3.org': 'Active' } )
>>> siteStatus.getSiteStatuses( 'NotExists')
S_ERROR( ... ))
>>> siteStatus.getSiteStatuses( None )
S_OK( { 'test1.test1.org': 'Active',
'test2.test2.net': 'Banned', },
...
}
)
:param siteNames: name(s) of the sites to be matched
:type siteNames: list, str
:return: S_OK() || S_ERROR()
"""
if self.rssFlag:
return self.__getRSSSiteStatus(siteNames)
else:
siteStatusDict = {}
wmsAdmin = WMSAdministratorClient()
if siteNames:
if isinstance(siteNames, six.string_types):
siteNames = [siteNames]
for siteName in siteNames:
result = wmsAdmin.getSiteMaskStatus(siteName)
if not result['OK']:
return result
else:
siteStatusDict[siteName] = result['Value']
else:
result = wmsAdmin.getSiteMaskStatus()
if not result['OK']:
return result
else:
siteStatusDict = result['Value']
return S_OK(siteStatusDict)
def __getRSSSiteStatus(self, siteName=None):
""" Gets from the cache or the RSS the Sites status. The cache is a
copy of the DB table. If it is not on the cache, most likely is not going
to be on the DB.
There is one exception: item just added to the CS, e.g. new Element.
The period between it is added to the DB and the changes are propagated
to the cache will be inconsistent, but not dangerous. Just wait <cacheLifeTime>
minutes.
:param siteName: name of the site
:type siteName: str
:return: dict
"""
cacheMatch = self.rssCache.match(siteName, '', '', 'all') # sites have VO="all".
self.log.debug('__getRSSSiteStatus')
self.log.debug(cacheMatch)
return cacheMatch
def getUsableSites(self, siteNames=None):
"""
Returns all sites that are usable if their
statusType is either Active or Degraded; in a list.
examples
>>> siteStatus.getUsableSites( ['test1.test1.uk', 'test2.test2.net', 'test3.test3.org'] )
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getUsableSites( None )
S_OK( ['test1.test1.uk', 'test3.test3.org', 'test4.test4.org', 'test5.test5.org', ...] )
>>> siteStatus.getUsableSites( 'NotExists' )
S_ERROR( ... )
:Parameters:
**siteNames** - `List` or `str`
name(s) of the sites to be matched
:return: S_OK() || S_ERROR()
"""
siteStatusDictRes = self.getSiteStatuses(siteNames)
if not siteStatusDictRes['OK']:
return siteStatusDictRes
siteStatusList = [x[0] for x in siteStatusDictRes['Value'].items() if x[1] in ['Active', 'Degraded']]
return S_OK(siteStatusList)
def getSites(self, siteState='Active'):
"""
By default, it gets the currently active site list
examples
>>> siteStatus.getSites()
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getSites( 'Active' )
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getSites( 'Banned' )
S_OK( ['test0.test0.uk', ... ] )
>>> siteStatus.getSites( 'All' )
S_OK( ['test1.test1.uk', 'test3.test3.org', 'test4.test4.org', 'test5.test5.org'...] )
>>> siteStatus.getSites( None )
S_ERROR( ... )
:Parameters:
**siteState** - `String`
state of the sites to be matched
:return: S_OK() || S_ERROR()
"""
if not siteState:
return S_ERROR(DErrno.ERESUNK, 'siteState parameter is empty')
siteStatusDictRes = self.getSiteStatuses()
if not siteStatusDictRes['OK']:
return siteStatusDictRes
if siteState.capitalize() == 'All':
# if no siteState is set return everything
siteList = list(siteStatusDictRes['Value'])
else:
# fix case sensitive string
siteState = siteState.capitalize()
allowedStateList = ['Active', 'Banned', 'Degraded', 'Probing', 'Error', 'Unknown']
if siteState not in allowedStateList:
return S_ERROR(errno.EINVAL, 'Not a valid status, parameter rejected')
siteList = [x[0] for x in siteStatusDictRes['Value'].items() if x[1] == siteState]
return S_OK(siteList)
def setSiteStatus(self, site, status, comment='No comment'):
"""
Set the status of a site in the 'SiteStatus' table of RSS
examples
>>> siteStatus.banSite( 'site1.test.test' )
S_OK()
>>> siteStatus.banSite( None )
S_ERROR( ... )
:Parameters:
**site** - `String`
the site that is going to be banned
**comment** - `String`
reason for banning
:return: S_OK() || S_ERROR()
"""
if not status:
return S_ERROR(DErrno.ERESUNK, 'status parameter is empty')
# fix case sensitive string
status = status.capitalize()
allowedStateList = ['Active', 'Banned', 'Degraded', 'Probing', 'Error', 'Unknown']
if status not in allowedStateList:
return S_ERROR(errno.EINVAL, 'Not a valid status, parameter rejected')
if self.rssFlag:
result = getProxyInfo()
if result['OK']:
tokenOwner = result['Value']['username']
else:
return S_ERROR("Unable to get user proxy info %s " % result['Message'])
tokenExpiration = datetime.utcnow() + timedelta(days=1)
self.rssCache.acquireLock()
try:
result = self.rsClient.modifyStatusElement('Site', 'Status', status=status, name=site,
tokenExpiration=tokenExpiration, reason=comment,
tokenOwner=tokenOwner)
if result['OK']:
self.rssCache.refreshCache()
else:
_msg = 'Error updating status of site %s to %s' % (site, status)
gLogger.warn('RSS: %s' % _msg)
# Release lock, no matter what.
finally:
self.rssCache.releaseLock()
else:
if status in ['Active', 'Degraded']:
result = WMSAdministratorClient().allowSite()
else:
result = WMSAdministratorClient().banSite()
return result
def getCacheDictFromRawData(rawList):
"""
Formats the raw data list, which we know it must have tuples of four elements.
( element1, element2 ) into a dictionary of tuples with the format
{ ( element1 ): element2 )}.
The resulting dictionary will be the new Cache.
It happens that element1 is elementName,
element4 is status.
:Parameters:
**rawList** - `list`
list of three element tuples [( element1, element2 ),... ]
:return: dict of the form { ( elementName ) : status, ... }
"""
res = {}
for entry in rawList:
res.update({(entry[0]): entry[1]})
return res
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Client/SiteStatus.py
|
Python
|
gpl-3.0
| 10,057
|
[
"DIRAC"
] |
2151be4728a1523d0bd19a7af800c6e2d00596071441ac8ac1577d14c586b701
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os, csv
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Role, Permission, \
IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, StudiedSex, Captivity, Species, Taxonomy, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, Status, PurposeEndangered, PurposeWeed, Version, Institute, EndSeason, ChangeLogger, PublicationsProtocol, DigitizationProtocol, Protocol, CommonTerm
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from app.matrix_functions import as_array, calc_lambda, calc_surv_issue, is_matrix_irreducible, is_matrix_primitive, is_matrix_ergodic
from flask import Flask, session
from flask.ext.alchemydumps import AlchemyDumps, AlchemyDumpsCommand
from flask.ext.sqlalchemy import SQLAlchemy
import random
def gen_hex_code():
r = lambda: random.randint(0,255)
return('#%02X%02X%02X' % (r(),r(),r()))
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role,
Permission=Permission, IUCNStatus=IUCNStatus, Species=Species, \
Taxonomy=Taxonomy, OrganismType=OrganismType, GrowthFormRaunkiaer=GrowthFormRaunkiaer, \
ReproductiveRepetition=ReproductiveRepetition, DicotMonoc=DicotMonoc, AngioGymno=AngioGymno, SpandExGrowthType=SpandExGrowthType, Trait=Trait, \
Publication=Publication, SourceType=SourceType, Database=Database, Purpose=Purpose, MissingData=MissingData, \
AuthorContact=AuthorContact, ContentEmail=ContentEmail, Population=Population, Ecoregion=Ecoregion, Continent=Continent, \
StageType=StageType, StageTypeClass=StageTypeClass, TransitionType=TransitionType, MatrixValue=MatrixValue, \
MatrixComposition=MatrixComposition, StartSeason=StartSeason, StudiedSex=StudiedSex, Captivity=Captivity, MatrixStage=MatrixStage,\
Matrix=Matrix, Interval=Interval, Fixed=Fixed, Small=Small, CensusTiming=CensusTiming, Status=Status, InvasiveStatusStudy=InvasiveStatusStudy, InvasiveStatusElsewhere=InvasiveStatusElsewhere, \
PurposeEndangered=PurposeEndangered, PurposeWeed=PurposeWeed, Version=Version, Institute=Institute, EndSeason=EndSeason, ChangeLogger = ChangeLogger, PublicationsProtocol = PublicationsProtocol, \
DigitizationProtocol = DigitizationProtocol, Protocol=Protocol, CommonTerm=CommonTerm)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('alchemydumps', AlchemyDumpsCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
def UnicodeDictReader(utf8_data, **kwargs):
csv_reader = csv.DictReader(utf8_data, **kwargs)
for row in csv_reader:
yield {key: unicode(value, 'latin-1') for key, value in row.iteritems()}
@manager.command
def delete_table_data():
response = raw_input("Are you sure you want to delete all data? (y/n): ")
if response == "y":
Version.query.delete()
Taxonomy.query.delete()
Matrix.query.delete()
Population.query.delete()
Publication.query.delete()
Trait.query.delete()
Species.query.delete()
Version.query.delete()
Protocol.query.delete()
db.session.commit()
print "All data has been removed"
elif response == "n":
print "Table data not deleted"
pass
else:
print("Valid response required (y/n)")
return
# This can be padded out for future stuff...
def coerce_boolean(string):
true = ['Yes', 'Divided','TRUE','T']
false = ['No', 'Undivided','FALSE','F','Indivisible']
if string in true:
return True
elif string in false:
return False
def return_con(obj):
import re, string
joined =''.join([value for key, value in obj.items()])
lower = joined.lower()
stripped = lower.replace(' ', '')
alphanumeric = re.sub('[\W_]+', '', stripped)
return alphanumeric
def create_id_string(dict):
new_dict = {
"species_accepted" : dict["species_accepted"], #
"journal" : dict['journal'], #
"year_pub" : dict["year"], #
"authors" : dict["authors"][:15], #first15 (if > 15, add character to end >)
"name" : dict["name"], # what sort of name is this?
"matrix_composite" : dict['matrix_composition_id'], #
"matrix_treatment" : dict['treatment_id'], #
"matrix_start_year" : dict['matrix_start_year'], #
"observation" : dict['observations'], #
"matrix_a_string" : dict['matrix_a_string'] #
}
return return_con(new_dict)
def similar(a, b):
from difflib import SequenceMatcher
return SequenceMatcher(None, a, b).ratio()
def generate_uid(species, publication, population, matrix):
import re
species_accepted = species.species_accepted
journal = publication.journal_name if publication else None
year_pub = publication.year if publication else None
try:
authors = publication.authors[:15].encode('utf-8')
except:
authors = ''
try:
pop_name = population.population_name.encode('utf-8')[:15] if population else None
except:
pop_name = ''
try:
composite = matrix.matrix_composition.comp_name
except AttributeError:
composite = ''
try:
start_year = matrix.matrix_start_year
except TypeError:
start_year = ''
import time
timestamp = time.time()
uid_concat = '{}{}{}{}{}{}{}{}'.format(species_accepted, journal, year_pub, authors, pop_name, composite, start_year, timestamp)
uid_lower = uid_concat.lower()
uid = re.sub('[\W_]+', '', uid_lower)
return uid
def data_clean(data):
incomplete = True if 'NDY' in data.values() else False
kwargs = {key: val for key, val in data.items() if val != 'NDY'}
amber = Status.query.filter_by(status_name="Amber").first()
green = Status.query.filter_by(status_name="Green").first()
#kwargs['version_ok'] = 0 if incomplete else 1
#kwargs['version_original'] = 1
#kwargs['version_latest'] = 1
return {'kwargs' : kwargs, 'status' : amber if incomplete else green}
def version_data(cleaned):
version = {'checked' : False,
'checked_count' : 0,
'statuses' : cleaned['status']#,
#'version_number' : 1,
#'user' : User.query.filter_by(username='admin').first(),
#'database' : Database.query.filter_by(database_name='COMPADRE 4').first()
}
return version
@manager.command
def submit_new(data):
import datetime
version = {'checked' : True,
'checked_count' : 1,
'statuses' : Status.query.filter_by(status_name="Green").first()}
if data["population_database_id"] == "XXX" or data["population_database_id"] == "X.X.X":
version = {'checked' : False,
'checked_count' : 0,
'statuses' : Status.query.filter_by(status_name="Pending").first()}
# When checking for null data later, these need to be excluded, as they will always have a value
ignore_keys = ['version_ok', 'version_latest', 'version_original']
''' DigitizationProtocol '''
# digitization_protocol = DigitizationProtocol.query.filter_by(field_name=data["digitization_protocol"]).first()
# if digitization_protocol == None:
# ac_dict = {'protocol_id' : protocol.id,
# 'field_name' : data['field_name'],
# 'name_in_csv' : data["name_in_csv"],
# 'database_model' : data["database_model"],
# 'field_description' : data["field_description"],
# 'field_short_description' : data["field_short_description"]
# }
# ac_cleaned = data_clean(ac_dict)
# digitization_protocol = Protocol(**ac_cleaned["kwargs"])
# db.session.add(digitization_protocol)
# db.session.commit()
''' Publication '''
publications_protocol = PublicationsProtocol.query.filter_by(protocol_number=data["publications_protocol_id"]).first()
if data["publication_DOI_ISBN"] == None:
publication = Publication.query.filter_by(authors=data["publication_authors"]).filter_by(year=data["publication_year"]).filter_by(journal_name=data["publication_journal_name"]).filter_by(additional_source_string=data["publication_additional_source_string"]).filter_by(study_notes= data["publication_study_notes"]).first()
else:
publication = Publication.query.filter_by(DOI_ISBN=data["publication_DOI_ISBN"]).first()
if publication == None:
purposes = {"Comparative Demography" : data["publication_purpose_comparative_demography"],
"Spatial Demography" : data["publication_purpose_spatial_demography"],
"Abiotic Impacts" : data["publication_purpose_abiotic"],
"PVA" : data["publication_purpose_pva"],
"Species Dynamics Description" : data["publication_purpose_species_dynamics_description"],
"Interspecific Interactions" : data["publication_purpose_interspecific_interactions"],
"Management Evaluation" : data["publication_purpose_management_evaluation"],
"Methodological Advancement" : data["publication_purpose_methodological_advancement"]
}
queryset = [Purpose.query.filter(Purpose.purpose_name == key).first() for key, val in purposes.items() if val == '1']
if data['publication_missing_data'] != 'NDY' and data['publication_missing_data']:
missing_data_unicode = data['publication_missing_data'].replace(" ", "").split(';')
missing_data = [MissingData.query.filter_by(missing_code=key).first() for key in missing_data_unicode if MissingData.query.filter_by(missing_code=key).first()]
else:
missing_data = 'NDY'
pub_dict = {'authors': data["publication_authors"],
'year' : data["publication_year"],
'publications_protocol' : publications_protocol,
'DOI_ISBN' : data["publication_DOI_ISBN"],
'additional_source_string' : data["publication_additional_source_string"],
'journal_name' : data["publication_journal_name"],
'date_digitised' : datetime.datetime.strptime(data['publication_date_digitization'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['publication_date_digitization'] else None,
'purposes' : queryset,
'study_notes' : data["publication_study_notes"]
}
pub_cleaned = data_clean(pub_dict)
# if not all(value == None for key, value in pub_cleaned["kwargs"].items() if key not in ignore_keys) and study_present:
publication = Publication(**pub_cleaned["kwargs"])
db.session.add(publication)
db.session.commit()
publication.missing_data = missing_data if type(missing_data) == list else []
db.session.add(publication)
db.session.commit()
''' Publication Version '''
#version = version_data(pub_cleaned)
publication_version = Version(**version)
publication_version.publication = publication
publication.colour = gen_hex_code()
possible_user = User.query.filter_by(name = data["publication_student"]).first()
na_user = User.query.filter_by(name = "N/A").first()
if possible_user == None:
possible_user = na_user
publication_version.entered_by_id = possible_user.id if possible_user else None,
publication_version.checked_by_id = na_user.id if na_user else None,
db.session.add(publication_version)
db.session.commit()
publication_version.original_version_id = publication_version.id
db.session.add(publication_version)
db.session.commit()
''' Author contact '''
author_contacts = AuthorContact.query.filter_by(corresponding_author = data["publication_corresponding_author"]).filter_by(corresponding_author_email = data["publication_corresponding_email"]).first()
if author_contacts == None:
ac_dict = {'publication_id' : publication.id,
'date_contacted' : datetime.datetime.strptime(data['date_author_contacted'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['date_author_contacted'] else None,
'date_contacted_again' : datetime.datetime.strptime(data['date_author_contacted_again'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['date_author_contacted_again'] else None,
'extra_content_email' : data["correspondence_email_content"],
'author_reply' : data["correspondence_author_reply"],
'corresponding_author' : data["publication_corresponding_author"],
'corresponding_author_email' : data["publication_corresponding_email"],
'correspondence_email_content' : data["correspondence_email_content"],
'extra_content_email' : data["extra_content_email"],
'contacting_user_id' : possible_user.id if possible_user else None
}
ac_cleaned = data_clean(ac_dict)
author_contact = AuthorContact(**ac_cleaned["kwargs"])
db.session.add(author_contact)
db.session.commit()
''' Author Contact Version '''
#version = version_data(ac_cleaned)
author_contact_version = Version(**version)
author_contact_version.author_contact = author_contact
db.session.add(author_contact_version)
db.session.commit()
author_contact_version.original_version_id = author_contact_version.id
db.session.add(author_contact_version)
db.session.commit()
''' Species '''
species = Species.query.filter_by(species_accepted=data["species_accepted"]).first()
iucn = IUCNStatus.query.filter_by(status_code=data["species_iucn_status_id"]).first()
if species == None:
species_dict = {'gbif_taxon_key': data["species_gbif_taxon_key"],
'species_iucn_taxonid': data["species_iucn_taxonid"],
'species_accepted' : data["species_accepted"],
'species_common' : data["species_common"],
'iucn_status_id' : iucn.id if iucn else None,
'image_path' : data["image_path"],
'image_path2' : data["image_path2"]}
species_cleaned = data_clean(species_dict)
species = Species(**species_cleaned["kwargs"])
db.session.add(species)
db.session.commit()
''' Species Version '''
#version = version_data(species_cleaned)
species_version = Version(**version)
species_version.species = species
db.session.add(species_version)
db.session.commit()
species_version.original_version_id = species_version.id
db.session.add(species_version)
db.session.commit()
''' Trait '''
spand_ex_growth_type = SpandExGrowthType.query.filter_by(type_name=data["trait_spand_ex_growth_type_id"]).first()
dicot_monoc = DicotMonoc.query.filter_by(dicot_monoc_name=data["trait_dicot_monoc_id"]).first()
growth_form_raunkiaer = GrowthFormRaunkiaer.query.filter_by(form_name=data["trait_growth_form_raunkiaer_id"]).first()
organism_type = OrganismType.query.filter_by(type_name=data["trait_organism_type_id"]).first()
angio_gymno = AngioGymno.query.filter_by(angio_gymno_name=data["trait_angio_gymno_id"]).first()
trait = Trait.query.filter_by(species_id=species.id).first()
if trait == None:
trait_dict = {'species_id': species.id,
'organism_type': organism_type,
'dicot_monoc': dicot_monoc,
'angio_gymno': angio_gymno,
'species_seedbank' : coerce_boolean(data["species_seedbank"]),
'species_gisd_status' : coerce_boolean(data["species_gisd_status"]),
'species_clonality' : coerce_boolean(data["species_clonality"]),
'spand_ex_growth_type_id' : spand_ex_growth_type.id if spand_ex_growth_type else None,
'growth_form_raunkiaer_id' : growth_form_raunkiaer.id if growth_form_raunkiaer else None}
trait_cleaned = data_clean(trait_dict)
trait = Trait(**trait_cleaned["kwargs"])
db.session.add(trait)
db.session.commit()
''' Trait Version '''
#version = version_data(trait_cleaned)
trait_version = Version(**version)
trait_version.trait = trait
db.session.add(trait_version)
db.session.commit()
trait_version.original_version_id = trait_version.id
db.session.add(trait_version)
db.session.commit()
''' Taxonomy '''
tax = Taxonomy.query.filter_by(species_id=species.id).first()
if tax == None:
tax_dict = {'authority' : None,
'tpl_version' : None,
'infraspecies_accepted' : None,
'species_epithet_accepted' : None,
'genus_accepted' : data["taxonomy_genus_accepted"],
'genus' : data["taxonomy_genus"],
'family' : data["taxonomy_family"],
'tax_order' : data["taxonomy_order"],
'tax_class' : data["taxonomy_class"],
'phylum' : data["taxonomy_phylum"],
'kingdom' : data["taxonomy_kingdom"],
'col_check_date' : datetime.datetime.strptime(data["taxonomy_col_check_date"], "%d/%m/%Y").strftime("%Y-%m-%d") if data['taxonomy_col_check_date'] else None,
'col_check_ok' : coerce_boolean(data["taxonomy_col_check_ok"])}
tax_cleaned = data_clean(tax_dict)
# if not all(value == None for key, value in tax_cleaned["kwargs"].items() if key not in ignore_keys):
tax = Taxonomy(**tax_cleaned["kwargs"])
db.session.add(tax)
db.session.commit()
tax.species = species
db.session.add(tax)
db.session.commit()
''' Taxonomy Version '''
#version = version_data(tax_cleaned)
taxonomy_version = Version(**version)
taxonomy_version.version_number = 1
taxonomy_version.taxonomy = tax
db.session.add(taxonomy_version)
db.session.commit()
taxonomy_version.original_version_id = taxonomy_version.id
db.session.add(taxonomy_version)
db.session.commit()
''' Study '''
# What if all none? Will they be grouped together?
# study = Study.query.filter_by(publication_id=publication.id, study_start=data["study_start"], study_end=data["study_end"]).first()
# if study == None:
# purpose_endangered = PurposeEndangered.query.filter_by(purpose_name=data["study_purpose_endangered_id"]).first() if data["study_purpose_endangered_id"] else data["study_purpose_endangered_id"]
#
# purpose_weed = PurposeWeed.query.filter_by(purpose_name="study_purpose_weed_id").first() if data["study_purpose_weed_id"] else data["study_purpose_weed_id"]
# database_source = Institute.query.filter_by(institution_name=data["study_database_source"]).first()# if data["study_purpose_weed_id"] else data["study_purpose_endangered_id"]
#
# study_dict = {'study_duration' : data["study_duration"],
# 'study_start' : data["study_start"],
# 'study_end' : data["study_end"],
# 'number_populations' : data["study_number_populations"],
# 'purpose_endangered_id' : purpose_endangered.id if purpose_endangered else None,
# 'purpose_weed_id' : purpose_weed.id if purpose_weed else None,
# 'database_source' : database_source}
#
# study_cleaned = data_clean(study_dict)
#
# # if not all(value == None for key, value in study_cleaned["kwargs"].items() if key not in ignore_keys) and population_present:
# study = Study(**study_cleaned["kwargs"])
# db.session.add(study)
# db.session.commit()
#
# study.publication_id = publication.id
# study.species_id = species.id
# db.session.add(study)
# db.session.commit()
#
#
# ''' Study Version '''
# version = version_data(study_cleaned)
# study_version = Version(**version)
# study_version.version_number = 1
# study_version.study = study
# db.session.add(study_version)
# db.session.commit()
# study_version.original_version_id = study_version.id
# db.session.add(study_version)
# db.session.commit()
''' Protocol '''
# digitization_protocol = DigitizationProtocol.query.filter_by(field_name=data["digitization_protocol_id"]).first()
# commonterm = CommonTerm.query.filter_by(common_value_name=data["commonterm_id"]).first()
# protocol = Protocol.query.filter_by(protocol_id=protocol.id).first()
# if protocol == None:
# protocol_dict = {'protocol_id' : protocol.id,
# 'digitization_protocol_id' : digitization_protocol.id if digitization_protocol else None,
# 'commonterm_id' : commonterm.id if commonterm else None}
# protocol_cleaned = data_clean(protocol_dict)
# protocol = Protocol(**protocol_cleaned["kwargs"])
# db.session.add(protocol)
# db.session.commit()
''' Population '''
''' '''
invasive_status_study = InvasiveStatusStudy.query.filter_by(status_name=data["population_invasive_status_study_id"]).first()
invasive_status_elsewhere = InvasiveStatusStudy.query.filter_by(status_name=data["population_invasive_status_elsewhere_id"]).first()
ecoregion = Ecoregion.query.filter_by(ecoregion_code=data["population_ecoregion_id"]).first()
continent = Continent.query.filter_by(continent_name=data["population_continent_id"]).first()
###Danny trying add database meta-table in correct location
database = Database.query.filter_by(database_master_version=data["population_database_id"]).first()
purpose_endangered = PurposeEndangered.query.filter_by(purpose_name=data["study_purpose_endangered_id"]).first() if data["study_purpose_endangered_id"] else data["study_purpose_endangered_id"]
purpose_weed = PurposeWeed.query.filter_by(purpose_name="study_purpose_weed_id").first() if data["study_purpose_weed_id"] else data["study_purpose_weed_id"]
database_source = Institute.query.filter_by(institution_name=data["study_database_source_id"]).first()
pop = Population.query.filter_by(population_name=data["population_name"], publication_id=publication.id, species_id=species.id).first()
if pop == None:
pop_dict = {'population_name' : data["population_name"],
'latitude' : data["population_latitude"],
'lat_ns' : data["lat_ns"],
'lat_deg' : data["lat_deg"],
'lat_min' : data["lat_min"],
'lat_sec' : data["lat_sec"],
'longitude' : data["population_longitude"],
'lon_ew' : data["lon_ew"],
'lon_deg' : data["lon_deg"],
'lon_min' : data["lon_min"],
'lon_sec' : data["lon_sec"],
'altitude' : data["population_altitude"],
#'pop_size' : data["population_pop_size"],
'country' : data["population_country"],
'invasive_status_study_id' : invasive_status_study.id if invasive_status_study else None,
'invasive_status_elsewhere_id' : invasive_status_elsewhere.id if invasive_status_elsewhere else None,
'ecoregion' : ecoregion,
'continent' : continent,
'database' : database,
'within_site_replication' : data['population_within_site_replication'],
'study_duration' : data["study_duration"],
'study_start' : data["study_start"],
'study_end' : data["study_end"],
'number_populations' : data["study_number_populations"],
'purpose_endangered_id' : purpose_endangered.id if purpose_endangered else None,
'purpose_weed_id' : purpose_weed.id if purpose_weed else None,
'database_source' : database_source
}
pop_cleaned = data_clean(pop_dict)
# if not all(value == None for key, value in pop_cleaned["kwargs"].items() if key not in ignore_keys) and matrix_present:
pop = Population(**pop_cleaned["kwargs"])
db.session.add(pop)
db.session.commit()
pop.species_author = data["species_author"]
pop.publication_id = publication.id
pop.species_id = species.id
db.session.add(pop)
db.session.commit()
''' Population Version '''
#version = version_data(pop_cleaned)
population_version = Version(**version)
population_version.version_number = 1
population_version.population = pop
db.session.add(population_version)
db.session.commit()
population_version.original_version_id = population_version.id
db.session.add(population_version)
db.session.commit()
''' Matrix '''
treatment_string = data["matrix_treatment_id"]
if treatment_string == 'NDY':
treatment = 'NDY'
elif treatment_string == None:
treatment = None
else:
treatment = Treatment.query.filter_by(treatment_name=data["matrix_treatment_id"]).first() if Treatment.query.filter_by(treatment_name=data["matrix_treatment_id"]).first() else Treatment(treatment_name=data["matrix_treatment_id"])
db.session.add(treatment)
db.session.commit()
matrix_dict = {'treatment' : treatment,
'matrix_split' : coerce_boolean(data["matrix_split"]),
'matrix_composition' : MatrixComposition.query.filter_by(comp_name=data["matrix_composition_id"]).first(),
'matrix_criteria_size' : data["matrix_criteria_size"],
'matrix_criteria_ontogeny' : coerce_boolean(data["matrix_criteria_ontogeny"]),
'matrix_criteria_age' : coerce_boolean(data["matrix_criteria_age"]),
'matrix_start_month' : data["matrix_start_month"],
'matrix_end_month' : data["matrix_end_month"],
'matrix_start_year' : data["matrix_start_year"],
'matrix_end_year' : data["matrix_end_year"],
'studied_sex' : StudiedSex.query.filter_by(sex_code=data["matrix_studied_sex_id"]).first(),
'start_season' : StartSeason.query.filter_by(season_id=data["matrix_start_season_id"]).first() if data["matrix_start_season_id"] else None,
'end_season' : EndSeason.query.filter_by(season_id=data["matrix_end_season_id"]).first() if data["matrix_end_season_id"] else None,
'matrix_fec' : coerce_boolean(data["matrix_fec"]),
'matrix_a_string' : data["matrix_a_string"],
'matrix_f_string' : data["matrix_f_string"],
'matrix_u_string' : data["matrix_u_string"],
'matrix_c_string' : data["matrix_c_string"],
'non_independence' : data["matrix_non_independence"],
'matrix_dimension' : data["matrix_dimension"],
'non_independence_author' : data["matrix_non_independence_author"],
'matrix_complete' : coerce_boolean(data["matrix_complete"]),
'class_number' : data["matrix_class_number"],
'observations' : data["matrix_observations"],
'captivities' : Captivity.query.filter_by(cap_code=data["matrix_captivity_id"]).first(),
'class_author' : data["matrix_class_author"],
'class_organized' : data["matrix_class_organized"],
'matrix_difficulty' : data["matrix_difficulty"],
'seasonal' : coerce_boolean(data["matrix_seasonal"]),
'survival_issue' : calc_surv_issue(data["matrix_u_string"]),
'periodicity' : data["matrix_periodicity"],
'matrix_irreducible' : is_matrix_irreducible(data["matrix_a_string"]),
'matrix_primitive' : is_matrix_primitive(data["matrix_a_string"]),
'matrix_ergodic' : is_matrix_ergodic(data["matrix_a_string"]),
'matrix_lambda' : calc_lambda(data["matrix_a_string"])
}
matrix_cleaned = data_clean(matrix_dict)
# if not all(value == None for key, value in matrix_cleaned["kwargs"].items() if key not in ignore_keys):
matrix = Matrix(**matrix_cleaned["kwargs"])
db.session.add(matrix)
db.session.commit()
matrix.population_id = pop.id
db.session.add(matrix)
db.session.commit()
''' matrix Version '''
#version = version_data(matrix_cleaned)
matrix_version = Version(**version)
matrix_version.version_number = 1
matrix_version.matrix = matrix
db.session.add(matrix_version)
db.session.commit()
matrix_version.original_version_id = matrix_version.id
db.session.add(matrix_version)
db.session.commit()
''' Fixed '''
fixed = Fixed.query.filter_by(matrix=matrix).first()
if fixed == None:
fixed_dict = {'matrix' : matrix,
'census_timings' : CensusTiming.query.filter_by(census_name=data["fixed_census_timing_id"]).first(),
'seed_stage_error' : coerce_boolean(data["fixed_seed_stage_error"]),
'smalls' : Small.query.filter_by(small_name=data["fixed_small_id"]).first(),
'vector_str' : data["matrix_vectors_includes_na"]
}
fixed_cleaned = data_clean(fixed_dict)
fixed = Fixed(**fixed_cleaned["kwargs"])
db.session.add(fixed)
db.session.commit()
''' fixed Version '''
#version = version_data(fixed_cleaned)
fixed_version = Version(**version)
fixed_version.version_number = 1
fixed_version.fixed = fixed
db.session.add(fixed_version)
db.session.commit()
fixed_version.original_version_id = fixed_version.id
db.session.add(fixed_version)
db.session.commit()
def migration_loop(input_file):
all_deets = []
for i, row in enumerate(input_file):
print i
data = convert_all_headers_new(row)
submit_new(data)
return "Migration Complete"
@manager.command
def migrate_compadre():
import csv
print "Migrating COMPADRE"
compadre = UnicodeDictReader(open("app/data-migrate/compadre_migration_2017.csv", "rU"))
return migration_loop(compadre)
@manager.command
def migrate_comadre():
import csv
print "Migrating COMADRE"
comadre = UnicodeDictReader(open("app/data-migrate/comadre_migration_2017.csv", "rU"))
return migration_loop(comadre)
@manager.command
def migrate_all():
import csv
print "Preparing to migrate COMPADRE and COMADRE"
compadre = UnicodeDictReader(open("app/data-migrate/compadre_migration_2017.csv", "rU"))
comadre = UnicodeDictReader(open("app/data-migrate/comadre_migration_2017.csv", "rU"))
print "Migrating COMPADRE"
migration_loop(compadre)
print "Migrating COMADRE"
migration_loop(comadre)
return
def convert_all_headers_new(dict):
import re
new_dict = {}
new_dict["species_gisd_status"] = dict["species_gisd_status"]
new_dict["species_seedbank"] = dict["species_seedbank"]
new_dict["species_clonality"] = dict["species_clonality"]
new_dict["publication_purpose_comparative_demography"] = dict["publication_purpose_comparative_demography"]
new_dict["publication_purpose_species_dynamics_description"] = dict["publication_purpose_species_dynamics_description"]
new_dict["publication_purpose_spatial_demography"] = dict["publication_purpose_spatial_demography"]
new_dict["publication_purpose_pva"] = dict["publication_purpose_pva"]
new_dict["publication_purpose_methodological_advancement"] = dict["publication_purpose_methodological_advancement"]
new_dict["publication_purpose_management_evaluation"] = dict["publication_purpose_management_evaluation"]
new_dict["publication_purpose_interspecific_interactions"] = dict["publication_purpose_interspecific_interactions"]
new_dict["publication_purpose_abiotic"] = dict["publication_purpose_abiotic"]
new_dict["species_author"] = dict["species_author"]
new_dict["species_accepted"] = dict["species_accepted"]
new_dict["species_common"]= dict["species_common"]
new_dict["taxonomy_genus"] = dict["taxonomy_genus"]
new_dict["taxonomy_family"] = dict["taxonomy_family"]
new_dict["taxonomy_order"] = dict["taxonomy_order"]
new_dict["taxonomy_class"] = dict["taxonomy_class"]
new_dict["taxonomy_phylum"] = dict["taxonomy_phylum"]
new_dict["taxonomy_kingdom"] = dict["taxonomy_kingdom"]
new_dict["trait_organism_type_id"] = dict["trait_organism_type"]
new_dict["trait_dicot_monoc_id"] = dict["trait_dicot_monoc"]
new_dict["trait_angio_gymno_id"] = dict["trait_angio_gymno"]
new_dict["publication_authors"] = dict["publication_authors"]
new_dict["publication_journal_name"] = dict["publication_journal_name"]
new_dict["publication_year"] = dict["publication_year"]
new_dict["publication_DOI_ISBN"] = dict["publication_DOI_ISBN"]
new_dict["publication_additional_source_string"] = dict["publication_additional_source_string"]
new_dict["study_duration"] = dict["study_duration"]
new_dict["study_start"] = dict["study_start"]
new_dict["study_end"] = dict["study_end"]
new_dict["matrix_periodicity"] = dict["matrix_periodicity"]
new_dict["study_number_populations"] = dict["study_number_populations"]
new_dict["matrix_criteria_size"] = dict["matrix_criteria_size"]
new_dict["matrix_criteria_ontogeny"] = dict["matrix_criteria_ontogeny"]
new_dict["matrix_criteria_age"] = dict["matrix_criteria_age"]
new_dict["population_name"] = dict["population_name"]
new_dict["population_latitude"] = dict["population_latitude"]
new_dict["lat_ns"] = dict["lat_ns"]
new_dict["lat_deg"] = dict["lat_deg"]
new_dict["lat_min"] = dict["lat_min"]
new_dict["lat_sec"] = dict["lat_sec"]
new_dict["population_longitude"] = dict["population_longitude"]
new_dict["lon_ew"] = dict["lon_ew"]
new_dict["lon_deg"] = dict["lon_deg"]
new_dict["lon_min"] = dict["lon_min"]
new_dict["lon_sec"] = dict["lon_sec"]
new_dict["population_altitude"]= dict["population_altitude"]
new_dict["population_country"] = dict["population_country"]
new_dict["population_continent_id"] = dict["population_continent"]
new_dict["population_ecoregion_id"] = dict["population_ecoregion"]
new_dict["matrix_studied_sex_id"] = dict["matrix_studied_sex"]
new_dict["matrix_composition_id"] = dict["matrix_composition"]
new_dict["matrix_treatment_id"] = dict["matrix_treatment_type"]
new_dict["matrix_captivity_id"] = dict["matrix_captivity"]
new_dict["matrix_start_year"] = dict["matrix_start_year"]
new_dict["matrix_start_season_id"] = dict["matrix_start_season"]
new_dict["matrix_start_month"] = dict["matrix_start_month"]
new_dict["matrix_end_year"] = dict["matrix_end_year"]
new_dict["matrix_end_season_id"] = dict["matrix_end_season"]
new_dict["matrix_end_month"] = dict["matrix_end_month"]
new_dict["matrix_split"] = dict["matrix_split"]
new_dict["matrix_fec"] = dict["matrix_fec"]
new_dict["matrix_observations"]= dict["matrix_observations"]
new_dict["matrix_dimension"] = dict["matrix_dimension"]
new_dict["matrix_survival_issue"] = dict["matrix_survival_issue"]
new_dict["matrix_a_string"] = dict["matrix_a_string"]
new_dict["matrix_c_string"] = dict["matrix_c_string"]
new_dict["matrix_f_string"] = dict["matrix_f_string"]
new_dict["matrix_u_string"] = dict["matrix_u_string"]
new_dict["matrix_class_organized"] = dict["matrix_class_organized"]
new_dict["matrix_class_author"] = dict["matrix_class_author"]
new_dict["matrix_class_number"] = dict["matrix_class_number"]
new_dict["matrix_vectors_includes_na"] = dict["matrix_vectors_includes_na"]
#new_dict["population_pop_size"] = dict["population_pop_size"]
new_dict["species_iucn_status_id"] = dict["species_iucn_status"]
new_dict["publication_date_digitization"] = dict["publication_date_digitization"]
# new_dict["species_esa_status_id"] = dict["species_esa_status"]
new_dict["population_invasive_status_study_id"] = dict["population_invasive_status_study"]
new_dict["population_invasive_status_elsewhere_id"] = dict["population_invasive_status_elsewhere"]
new_dict["study_purpose_endangered_id"] = dict["study_purpose_endangered"]
new_dict["study_purpose_weed_id"] = dict["study_purpose_weed"]
new_dict["trait_spand_ex_growth_type_id"] = dict["trait_spand_ex_growth_type"]
new_dict["trait_growth_form_raunkiaer_id"] = dict["trait_growth_form_raunkiaer"]
new_dict["fixed_census_timing_id"] = dict["fixed_census_timing"]
new_dict["fixed_small_id"] = dict["fixed_small"]
new_dict["fixed_seed_stage_error"] = dict["fixed_seed_stage_error"]
new_dict["species_gbif_taxon_key"] = dict["species_gbif_taxon_key"]
#new_dict["version_checked"] = dict["matrix_checked"] #column not in scv?
new_dict["version_checked_count"] = dict["matrix_checked_count"]
new_dict["taxonomy_genus_accepted"] = dict["taxonomy_genus_accepted"]
new_dict["matrix_independent"] = dict["matrix_independent"]
new_dict["matrix_non_independence"] = dict["matrix_non_independence"]
new_dict["matrix_non_independence_author"] = dict["matrix_non_independence_author"]
new_dict["matrix_difficulty"] = dict["matrix_difficulty"]
new_dict["matrix_complete"] = dict["matrix_complete"]
new_dict["matrix_seasonal"] = dict["matrix_seasonal"]
#new_dict["database_master_version"] = dict["database_master_version"]
new_dict["population_database_id"] = dict["database_master_version"]
#new_dict["database_date_created"] = dict["database_date_created"]
#new_dict["database_number_species_accepted"] = dict["database_number_species_accepted"]
#new_dict["database_number_studies"] = dict["database_number_studies"]
#new_dict["database_number_matrices"] = dict["database_number_matrices"]
#new_dict["database_agreement"] = dict["database_agreement"]
new_dict["taxonomy_col_check_ok"] = dict["taxonomy_col_check_ok"]
new_dict["taxonomy_col_check_date"]= dict["taxonomy_col_check_date"]
new_dict["matrix_independence_origin"] = dict["matrix_independence_origin"]
new_dict['image_path'] = dict["image_path"]
new_dict['image_path2'] = dict["image_path2"]
new_dict['species_iucn_taxonid'] = dict["species_iucn_taxonid"]
# correspondence
new_dict['publication_corresponding_author'] = dict["publication_corresponding_author"]
new_dict['publication_corresponding_email'] = dict["publication_corresponding_email"]
new_dict['date_author_contacted'] = dict["date_author_contacted"]
new_dict['date_author_contacted_again'] = dict["date_author_contacted_again"]
new_dict['correspondence_email_content'] = dict["correspondence_email_content"] # what was missing from publication (asked for)
new_dict['correspondence_author_reply'] = dict["correspondence_author_reply"] # did they reply?
new_dict['publication_student'] = dict["publication_student"] #who asked for it
new_dict['extra_content_email'] = dict["extra_content_email"] # extra information asked for
new_dict['publication_missing_data'] = dict["publication_missing_data"] # attatched to publication as a note about what is missing
new_dict['population_within_site_replication'] = dict["within_site_replication"]
new_dict['study_database_source_id'] = dict["study_database_source"]
new_dict['publication_study_notes'] = dict["publication_study_notes"]
new_dict['publications_protocol_id'] = dict["publications_protocol"]
new_dict['digitization_protocol_id'] = dict["digitization_protocol"]
new_dict['commonterm_id'] = dict["commonterm"]
for key, value in new_dict.iteritems():
if value == "NA":
new_dict[key] = None
if value == "":
new_dict[key] = None
if value == "None":
new_dict[key] = None
if value == "NC":
new_dict[key] = None
if value == ".":
new_dict[key] = None
if value == "AFI":
new_dict[key] = 'NDY'
return new_dict
@manager.command
def migrate_meta():
from app.models import User, Role, Permission, \
IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, StudiedSex, Captivity, Species, Taxonomy, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, PurposeEndangered, PurposeWeed, Institute, Version, \
PublicationsProtocol, DigitizationProtocol, Protocol, CommonTerm
print "Migrating Meta Tables..."
Role.insert_roles()
Species.migrate()
Taxonomy.migrate()
Trait.migrate()
Publication.migrate()
AuthorContact.migrate()
Population.migrate()
StageType.migrate()
MatrixValue.migrate()
Matrix.migrate()
Fixed.migrate()
Version.migrate()
Institute.migrate()
User.migrate()
Database.migrate()
Status.migrate()
PublicationsProtocol.migrate()
DigitizationProtocol.migrate()
CommonTerm.migrate()
return
def model_version(model):
count = model.query.count()
for x in range(count):
y = model.query.get(x+1)
y.version_latest = 1
y.version_original = 1
y.version_ok = 1
db.session.add(y)
db.session.commit()
@manager.command
def version_current():
models = [Species(), Taxonomy(), Trait(), Publication(), AuthorContact(), Population(), StageType(), MatrixValue(),Matrix(), Fixed(), Institute(), Protocol()]
for model in models:
model_version(model)
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade, migrate, init
from app.models import User, Role, Permission
print "Migrating models to database"
init()
migrate()
upgrade()
migrate()
print "Models migrated to database"
print "Migrating meta data to tables"
migrate_meta()
print "Meta tables migrated"
print "Initial migration of our current version of database..."
# migrate_comadre()
migrate_all()
if __name__ == '__main__':
manager.run()
|
Spandex-at-Exeter/demography_database
|
manage.py
|
Python
|
mit
| 43,949
|
[
"Amber"
] |
bec16371db65d2b4a3880d360d4117d0997987c743cda5b69ca320b408d01805
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from . import ClusteringMethod
from . import ClusterCollection
__all__ = [
'ClusterCollection.ClusterCollection',
'ClusterCollection.Cluster',
'ClusteringMethod.AffinityPropagationNative'
'ClusteringMethod.AffinityPropagation'
'ClusteringMethod.DBSCAN']
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/encore/clustering/__init__.py
|
Python
|
gpl-2.0
| 1,294
|
[
"MDAnalysis"
] |
1d40f140821af6774d09d500f59e664032048fb49643e3600dbb2bbdb37da0b4
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
"""
Jansen-Rit and derivative models.
"""
from .base import ModelNumbaDfun, Model, LOG, numpy, basic, arrays
import math
from numba import guvectorize, float64
class JansenRit(ModelNumbaDfun):
r"""
The Jansen and Rit is a biologically inspired mathematical framework
originally conceived to simulate the spontaneous electrical activity of
neuronal assemblies, with a particular focus on alpha activity, for instance,
as measured by EEG. Later on, it was discovered that in addition to alpha
activity, this model was also able to simulate evoked potentials.
.. [JR_1995] Jansen, B., H. and Rit V., G., *Electroencephalogram and
visual evoked potential generation in a mathematical model of
coupled cortical columns*, Biological Cybernetics (73) 357:366, 1995.
.. [J_1993] Jansen, B., Zouridakis, G. and Brandt, M., *A
neurophysiologically-based mathematical model of flash visual evoked
potentials*
.. figure :: img/JansenRit_45_mode_0_pplane.svg
:alt: Jansen and Rit phase plane (y4, y5)
The (:math:`y_4`, :math:`y_5`) phase-plane for the Jansen and Rit model.
.. automethod:: JansenRit.__init__
The dynamic equations were taken from [JR_1995]_
.. math::
\dot{y_0} &= y_3 \\
\dot{y_3} &= A a\,S[y_1 - y_2] - 2a\,y_3 - 2a^2\, y_0 \\
\dot{y_1} &= y_4\\
\dot{y_4} &= A a \,[p(t) + \alpha_2 J + S[\alpha_1 J\,y_0]+ c_0]
-2a\,y - a^2\,y_1 \\
\dot{y_2} &= y_5 \\
\dot{y_5} &= B b (\alpha_4 J\, S[\alpha_3 J \,y_0]) - 2 b\, y_5
- b^2\,y_2 \\
S[v] &= \frac{2\, \nu_{max}}{1 + \exp^{r(v_0 - v)}}
"""
_ui_name = "Jansen-Rit"
ui_configurable_parameters = ['A', 'B', 'a', 'b', 'v0', 'nu_max', 'r', 'J',
'a_1', 'a_2', 'a_3', 'a_4', 'p_min', 'p_max',
'mu']
#Define traited attributes for this model, these represent possible kwargs.
A = arrays.FloatArray(
label=":math:`A`",
default=numpy.array([3.25]),
range=basic.Range(lo=2.6, hi=9.75, step=0.05),
doc="""Maximum amplitude of EPSP [mV]. Also called average synaptic gain.""",
order=1)
B = arrays.FloatArray(
label=":math:`B`",
default=numpy.array([22.0]),
range=basic.Range(lo=17.6, hi=110.0, step=0.2),
doc="""Maximum amplitude of IPSP [mV]. Also called average synaptic gain.""",
order=2)
a = arrays.FloatArray(
label=":math:`a`",
default=numpy.array([0.1]),
range=basic.Range(lo=0.05, hi=0.15, step=0.01),
doc="""Reciprocal of the time constant of passive membrane and all
other spatially distributed delays in the dendritic network [ms^-1].
Also called average synaptic time constant.""",
order=3)
b = arrays.FloatArray(
label=":math:`b`",
default=numpy.array([0.05]),
range=basic.Range(lo=0.025, hi=0.075, step=0.005),
doc="""Reciprocal of the time constant of passive membrane and all
other spatially distributed delays in the dendritic network [ms^-1].
Also called average synaptic time constant.""",
order=4)
v0 = arrays.FloatArray(
label=":math:`v_0`",
default=numpy.array([5.52]),
range=basic.Range(lo=3.12, hi=6.0, step=0.02),
doc="""Firing threshold (PSP) for which a 50% firing rate is achieved.
In other words, it is the value of the average membrane potential
corresponding to the inflection point of the sigmoid [mV].
The usual value for this parameter is 6.0.""",
order=5)
nu_max = arrays.FloatArray(
label=r":math:`\nu_{max}`",
default=numpy.array([0.0025]),
range=basic.Range(lo=0.00125, hi=0.00375, step=0.00001),
doc="""Determines the maximum firing rate of the neural population
[s^-1].""",
order=6)
r = arrays.FloatArray(
label=":math:`r`",
default=numpy.array([0.56]),
range=basic.Range(lo=0.28, hi=0.84, step=0.01),
doc="""Steepness of the sigmoidal transformation [mV^-1].""",
order=7)
J = arrays.FloatArray(
label=":math:`J`",
default=numpy.array([135.0]),
range=basic.Range(lo=65.0, hi=1350.0, step=1.),
doc="""Average number of synapses between populations.""",
order=8)
a_1 = arrays.FloatArray(
label=r":math:`\alpha_1`",
default=numpy.array([1.0]),
range=basic.Range(lo=0.5, hi=1.5, step=0.1),
doc="""Average probability of synaptic contacts in the feedback
excitatory loop.""",
order=9)
a_2 = arrays.FloatArray(
label=r":math:`\alpha_2`",
default=numpy.array([0.8]),
range=basic.Range(lo=0.4, hi=1.2, step=0.1),
doc="""Average probability of synaptic contacts in the feedback
excitatory loop.""",
order=10)
a_3 = arrays.FloatArray(
label=r":math:`\alpha_3`",
default=numpy.array([0.25]),
range=basic.Range(lo=0.125, hi=0.375, step=0.005),
doc="""Average probability of synaptic contacts in the feedback
excitatory loop.""",
order=11)
a_4 = arrays.FloatArray(
label=r":math:`\alpha_4`",
default=numpy.array([0.25]),
range=basic.Range(lo=0.125, hi=0.375, step=0.005),
doc="""Average probability of synaptic contacts in the slow feedback
inhibitory loop.""",
order=12)
p_min = arrays.FloatArray(
label=":math:`p_{min}`",
default=numpy.array([0.12]),
range=basic.Range(lo=0.0, hi=0.12, step=0.01),
doc="""Minimum input firing rate.""",
order=13)
p_max = arrays.FloatArray(
label=":math:`p_{max}`",
default=numpy.array([0.32]),
range=basic.Range(lo=0.0, hi=0.32, step=0.01),
doc="""Maximum input firing rate.""",
order=14)
mu = arrays.FloatArray(
label=r":math:`\mu_{max}`",
default=numpy.array([0.22]),
range=basic.Range(lo=0.0, hi=0.22, step=0.01),
doc="""Mean input firing rate""",
order=15)
#Used for phase-plane axis ranges and to bound random initial() conditions.
state_variable_range = basic.Dict(
label="State Variable ranges [lo, hi]",
default={"y0": numpy.array([-1.0, 1.0]),
"y1": numpy.array([-500.0, 500.0]),
"y2": numpy.array([-50.0, 50.0]),
"y3": numpy.array([-6.0, 6.0]),
"y4": numpy.array([-20.0, 20.0]),
"y5": numpy.array([-500.0, 500.0])},
doc="""The values for each state-variable should be set to encompass
the expected dynamic range of that state-variable for the current
parameters, it is used as a mechanism for bounding random inital
conditions when the simulation isn't started from an explicit history,
it is also provides the default range of phase-plane plots.""",
order=16)
variables_of_interest = basic.Enumerate(
label="Variables watched by Monitors",
options=["y0", "y1", "y2", "y3", "y4", "y5"],
default=["y0", "y1", "y2", "y3"],
select_multiple=True,
doc="""This represents the default state-variables of this Model to be
monitored. It can be overridden for each Monitor if desired. The
corresponding state-variable indices for this model are :math:`y0 = 0`,
:math:`y1 = 1`, :math:`y2 = 2`, :math:`y3 = 3`, :math:`y4 = 4`, and
:math:`y5 = 5`""",
order=17)
# variables_of_interest = arrays.IntegerArray(
# label = "Variables watched by Monitors",
# range = basic.Range(lo = 0.0, hi = 6.0, step = 1.0),
# default = numpy.array([0, 3], dtype=numpy.int32),
# doc = """This represents the default state-variables of this Model to be
# monitored. It can be overridden for each Monitor if desired. The
# corresponding state-variable indices for this model are :math:`y0 = 0`,
# :math:`y1 = 1`, :math:`y2 = 2`, :math:`y3 = 3`, :math:`y4 = 4`, and
# :math:`y5 = 5`""",
# order = 17)
state_variables = 'y0 y1 y2 y3 y4 y5'.split()
_nvar = 6
cvar = numpy.array([1, 2], dtype=numpy.int32)
def _numpy_dfun(self, state_variables, coupling, local_coupling=0.0):
r"""
The dynamic equations were taken from [JR_1995]_
.. math::
\dot{y_0} &= y_3 \\
\dot{y_3} &= A a\,S[y_1 - y_2] - 2a\,y_3 - 2a^2\, y_0 \\
\dot{y_1} &= y_4\\
\dot{y_4} &= A a \,[p(t) + \alpha_2 J S[\alpha_1 J\,y_0]+ c_0]
-2a\,y - a^2\,y_1 \\
\dot{y_2} &= y_5 \\
\dot{y_5} &= B b (\alpha_4 J\, S[\alpha_3 J \,y_0]) - 2 b\, y_5
- b^2\,y_2 \\
S[v] &= \frac{2\, \nu_{max}}{1 + \exp^{r(v_0 - v)}}
:math:`p(t)` can be any arbitrary function, including white noise or
random numbers taken from a uniform distribution, representing a pulse
density with an amplitude varying between 120 and 320
For Evoked Potentials, a transient component of the input,
representing the impulse density attribuable to a brief visual input is
applied. Time should be in seconds.
.. math::
p(t) = q\,(\frac{t}{w})^n \, \exp{-\frac{t}{w}} \\
q = 0.5 \\
n = 7 \\
w = 0.005 [s]
"""
y0, y1, y2, y3, y4, y5 = state_variables
# NOTE: This is assumed to be \sum_j u_kj * S[y_{1_j} - y_{2_j}]
lrc = coupling[0, :]
short_range_coupling = local_coupling*(y1 - y2)
# NOTE: for local couplings
# 0: pyramidal cells
# 1: excitatory interneurons
# 2: inhibitory interneurons
# 0 -> 1,
# 0 -> 2,
# 1 -> 0,
# 2 -> 0,
exp = numpy.exp
sigm_y1_y2 = 2.0 * self.nu_max / (1.0 + exp(self.r * (self.v0 - (y1 - y2))))
sigm_y0_1 = 2.0 * self.nu_max / (1.0 + exp(self.r * (self.v0 - (self.a_1 * self.J * y0))))
sigm_y0_3 = 2.0 * self.nu_max / (1.0 + exp(self.r * (self.v0 - (self.a_3 * self.J * y0))))
return numpy.array([
y3,
y4,
y5,
self.A * self.a * sigm_y1_y2 - 2.0 * self.a * y3 - self.a ** 2 * y0,
self.A * self.a * (self.mu + self.a_2 * self.J * sigm_y0_1 + lrc + short_range_coupling)
- 2.0 * self.a * y4 - self.a ** 2 * y1,
self.B * self.b * (self.a_4 * self.J * sigm_y0_3) - 2.0 * self.b * y5 - self.b ** 2 * y2,
])
def dfun(self, y, c, local_coupling=0.0):
src = local_coupling*(y[1] - y[2])[:, 0]
y_ = y.reshape(y.shape[:-1]).T
c_ = c.reshape(c.shape[:-1]).T
deriv = _numba_dfun_jr(y_, c_, src,
self.nu_max, self.r, self.v0, self.a, self.a_1, self.a_2, self.a_3, self.a_4,
self.A, self.b, self.B, self.J, self.mu
)
return deriv.T[..., numpy.newaxis]
@guvectorize([(float64[:],) * 17], '(n),(m)' + ',()'*14 + '->(n)', nopython=True)
def _numba_dfun_jr(y, c,
src,
nu_max, r, v0, a, a_1, a_2, a_3, a_4, A, b, B, J, mu,
dx):
sigm_y1_y2 = 2.0 * nu_max[0] / (1.0 + math.exp(r[0] * (v0[0] - (y[1] - y[2]))))
sigm_y0_1 = 2.0 * nu_max[0] / (1.0 + math.exp(r[0] * (v0[0] - (a_1[0] * J[0] * y[0]))))
sigm_y0_3 = 2.0 * nu_max[0] / (1.0 + math.exp(r[0] * (v0[0] - (a_3[0] * J[0] * y[0]))))
dx[0] = y[3]
dx[1] = y[4]
dx[2] = y[5]
dx[3] = A[0] * a[0] * sigm_y1_y2 - 2.0 * a[0] * y[3] - a[0] ** 2 * y[0]
dx[4] = A[0] * a[0] * (mu[0] + a_2[0] * J[0] * sigm_y0_1 + c[0] + src[0]) - 2.0 * a[0] * y[4] - a[0] ** 2 * y[1]
dx[5] = B[0] * b[0] * (a_4[0] * J[0] * sigm_y0_3) - 2.0 * b[0] * y[5] - b[0] ** 2 * y[2]
class ZetterbergJansen(Model):
"""
Zetterberg et al derived a model inspired by the Wilson-Cowan equations. It served as a basis for the later,
better known Jansen-Rit model.
.. [ZL_1978] Zetterberg LH, Kristiansson L and Mossberg K. Performance of a Model for a Local Neuron Population.
Biological Cybernetics 31, 15-26, 1978.
.. [JB_1995] Jansen, B., H. and Rit V., G., *Electroencephalogram and
visual evoked potential generation in a mathematical model of
coupled cortical columns*, Biological Cybernetics (73) 357:366, 1995.
.. [JB_1993] Jansen, B., Zouridakis, G. and Brandt, M., *A
neurophysiologically-based mathematical model of flash visual evoked
potentials*
.. [M_2007] Moran
.. [S_2010] Spiegler
.. [A_2012] Auburn
.. figure :: img/ZetterbergJansen_01_mode_0_pplane.svg
:alt: Jansen and Rit phase plane
"""
_ui_name = "Zetterberg-Jansen"
ui_configurable_parameters = ['He', 'Hi', 'ke', 'ki', 'e0', 'rho_2', 'rho_1', 'gamma_1',
'gamma_2', 'gamma_3', 'gamma_4', 'gamma_5', 'P', 'U', 'Q']
#Define traited attributes for this model, these represent possible kwargs.
He = arrays.FloatArray(
label=":math:`H_e`",
default=numpy.array([3.25]),
range=basic.Range(lo=2.6, hi=9.75, step=0.05),
doc="""Maximum amplitude of EPSP [mV]. Also called average synaptic gain.""",
order=1)
Hi = arrays.FloatArray(
label=":math:`H_i`",
default=numpy.array([22.0]),
range=basic.Range(lo=17.6, hi=110.0, step=0.2),
doc="""Maximum amplitude of IPSP [mV]. Also called average synaptic gain.""",
order=2)
ke = arrays.FloatArray(
label=r":math:`\kappa_e`",
default=numpy.array([0.1]),
range=basic.Range(lo=0.05, hi=0.15, step=0.01),
doc="""Reciprocal of the time constant of passive membrane and all
other spatially distributed delays in the dendritic network [ms^-1].
Also called average synaptic time constant.""",
order=3)
ki = arrays.FloatArray(
label=r":math:`\kappa_i`",
default=numpy.array([0.05]),
range=basic.Range(lo=0.025, hi=0.075, step=0.005),
doc="""Reciprocal of the time constant of passive membrane and all
other spatially distributed delays in the dendritic network [ms^-1].
Also called average synaptic time constant.""",
order=4)
e0 = arrays.FloatArray(
label=r":math:`e_0`",
default=numpy.array([0.0025]),
range=basic.Range(lo=0.00125, hi=0.00375, step=0.00001),
doc="""Half of the maximum population mean firing rate [ms^-1].""",
order=6)
rho_2 = arrays.FloatArray(
label=r":math:`\rho_2`",
default=numpy.array([6.0]),
range=basic.Range(lo=3.12, hi=10.0, step=0.02),
doc="""Firing threshold (PSP) for which a 50% firing rate is achieved.
In other words, it is the value of the average membrane potential
corresponding to the inflection point of the sigmoid [mV]. Population mean firing threshold.""",
order=5)
rho_1 = arrays.FloatArray(
label=r":math:`\rho_1`",
default=numpy.array([0.56]),
range=basic.Range(lo=0.28, hi=0.84, step=0.01),
doc="""Steepness of the sigmoidal transformation [mV^-1].""",
order=7)
gamma_1 = arrays.FloatArray(
label=r":math:`\gamma_1`",
default=numpy.array([135.0]),
range=basic.Range(lo=65.0, hi=1350.0, step=5.),
doc="""Average number of synapses between populations (pyramidal to stellate).""",
order=8)
gamma_2 = arrays.FloatArray(
label=r":math:`\gamma_2`",
default=numpy.array([108.]),
range=basic.Range(lo=0.0, hi=200, step=10.0),
doc="""Average number of synapses between populations (stellate to pyramidal).""",
order=9)
gamma_3 = arrays.FloatArray(
label=r":math:`\gamma_3`",
default=numpy.array([33.75]),
range=basic.Range(lo=0.0, hi=200, step=10.0),
doc="""Connectivity constant (pyramidal to interneurons)""",
order=10)
gamma_4 = arrays.FloatArray(
label=r":math:`\gamma_4`",
default=numpy.array([33.75]),
range=basic.Range(lo=0.0, hi=200, step=10.0),
doc="""Connectivity constant (interneurons to pyramidal)""",
order=11)
gamma_5 = arrays.FloatArray(
label=r":math:`\gamma_5`",
default=numpy.array([15]),
range=basic.Range(lo=0.0, hi=100, step=10.0),
doc="""Connectivity constant (interneurons to interneurons)""",
order=12)
gamma_1T = arrays.FloatArray(
label=r":math:`\gamma_{1T}`",
default=numpy.array([1.0]),
range=basic.Range(lo=0.0, hi=1000.0, step=5.),
doc="""Coupling factor from the extrinisic input to the spiny stellate population.""",
order=17)
gamma_3T = arrays.FloatArray(
label=r":math:`\gamma_{3T}`",
default=numpy.array([1.0]),
range=basic.Range(lo=0.0, hi=1000.0, step=5.),
doc="""Coupling factor from the extrinisic input to the pyramidal population.""",
order=18)
gamma_2T = arrays.FloatArray(
label=r":math:`\gamma_{2T}`",
default=numpy.array([1.0]),
range=basic.Range(lo=0.0, hi=1000.0, step=5.),
doc="""Coupling factor from the extrinisic input to the inhibitory population.""",
order=19)
P = arrays.FloatArray(
label=":math:`P`",
default=numpy.array([0.12]),
range=basic.Range(lo=0.0, hi=0.350, step=0.01),
doc="""Maximum firing rate to the pyramidal population [ms^-1].
(External stimulus. Constant intensity.Entry point for coupling.)""",
order=13)
U = arrays.FloatArray(
label=":math:`U`",
default=numpy.array([0.12]),
range=basic.Range(lo=0.0, hi=0.350, step=0.01),
doc="""Maximum firing rate to the stellate population [ms^-1].
(External stimulus. Constant intensity.Entry point for coupling.)""",
order=14)
Q = arrays.FloatArray(
label=":math:`Q`",
default=numpy.array([0.12]),
range=basic.Range(lo=0.0, hi=0.350, step=0.01),
doc="""Maximum firing rate to the interneurons population [ms^-1].
(External stimulus. Constant intensity.Entry point for coupling.)""",
order=15)
#Used for phase-plane axis ranges and to bound random initial() conditions.
state_variable_range = basic.Dict(
label="State Variable ranges [lo, hi]",
default={"v1": numpy.array([-100.0, 100.0]),
"y1": numpy.array([-500.0, 500.0]),
"v2": numpy.array([-100.0, 50.0]),
"y2": numpy.array([-100.0, 6.0]),
"v3": numpy.array([-100.0, 6.0]),
"y3": numpy.array([-100.0, 6.0]),
"v4": numpy.array([-100.0, 20.0]),
"y4": numpy.array([-100.0, 20.0]),
"v5": numpy.array([-100.0, 20.0]),
"y5": numpy.array([-500.0, 500.0]),
"v6": numpy.array([-100.0, 20.0]),
"v7": numpy.array([-100.0, 20.0]),},
doc="""The values for each state-variable should be set to encompass
the expected dynamic range of that state-variable for the current
parameters, it is used as a mechanism for bounding random inital
conditions when the simulation isn't started from an explicit history,
it is also provides the default range of phase-plane plots.""",
order=16)
variables_of_interest = basic.Enumerate(
label="Variables watched by Monitors",
options=["v1", "y1", "v2", "y2", "v3", "y3", "v4", "y4", "v5", "y5", "v6", "v7"],
default=["v6", "v7", "v2", "v3", "v4", "v5"],
select_multiple=True,
doc="""This represents the default state-variables of this Model to be
monitored. It can be overridden for each Monitor if desired. The
corresponding state-variable indices for this model are :math:`v_6 = 0`,
:math:`v_7 = 1`, :math:`v_2 = 2`, :math:`v_3 = 3`, :math:`v_4 = 4`, and
:math:`v_5 = 5`""",
order=42)
state_variables = 'v1 y1 v2 y2 v3 y3 v4 y4 v5 y5 v6 v7'.split()
_nvar = 12
cvar = numpy.array([10], dtype=numpy.int32)
Heke = None # self.He * self.ke
Hiki = None # self.Hi * self.ki
ke_2 = None # 2 * self.ke
ki_2 = None # 2 * self.ki
keke = None # self.ke **2
kiki = None # self.ki **2
def dfun(self, state_variables, coupling, local_coupling=0.0):
magic_exp_number = 709
v1 = state_variables[0, :]
y1 = state_variables[1, :]
v2 = state_variables[2, :]
y2 = state_variables[3, :]
v3 = state_variables[4, :]
y3 = state_variables[5, :]
v4 = state_variables[6, :]
y4 = state_variables[7, :]
v5 = state_variables[8, :]
y5 = state_variables[9, :]
v6 = state_variables[10, :]
v7 = state_variables[11, :]
derivative = numpy.empty_like(state_variables)
# NOTE: long_range_coupling term: coupling variable is v6 . EQUATIONS
# ASSUME linear coupling is used. 'coupled_input' represents a rate. It
# is very likely that coeffs gamma_xT should be independent for each of the
# terms considered as extrinsic input (P, Q, U) (long range coupling) (local coupling)
# and noise.
coupled_input = self.sigma_fun(coupling[0, :] + local_coupling * v6)
# exc input to the excitatory interneurons
derivative[0] = y1
derivative[1] = self.Heke * (self.gamma_1 * self.sigma_fun(v2 - v3) + self.gamma_1T * (self.U + coupled_input )) - self.ke_2 * y1 - self.keke * v1
# exc input to the pyramidal cells
derivative[2] = y2
derivative[3] = self.Heke * (self.gamma_2 * self.sigma_fun(v1) + self.gamma_2T * (self.P + coupled_input )) - self.ke_2 * y2 - self.keke * v2
# inh input to the pyramidal cells
derivative[4] = y3
derivative[5] = self.Hiki * (self.gamma_4 * self.sigma_fun(v4 - v5)) - self.ki_2 * y3 - self.kiki * v3
derivative[6] = y4
# exc input to the inhibitory interneurons
derivative[7] = self.Heke * (self.gamma_3 * self.sigma_fun(v2 - v3) + self.gamma_3T * (self.Q + coupled_input)) - self.ke_2 * y4 - self.keke * v4
derivative[8] = y5
# inh input to the inhibitory interneurons
derivative[9] = self.Hiki * (self.gamma_5 * self.sigma_fun(v4 - v5)) - self.ki_2 * y5 - self.keke * v5
# aux variables (the sum gathering the postsynaptic inh & exc potentials)
# pyramidal cells
derivative[10] = y2 - y3
# inhibitory cells
derivative[11] = y4 - y5
return derivative
def sigma_fun(self, sv):
"""
Neuronal activation function. This sigmoidal function
increases from 0 to Q_max as "sv" increases.
sv represents a membrane potential state variable (V).
"""
#HACKERY: Hackery for exponential s that blow up.
# Set to inf, so the result will be effectively zero.
magic_exp_number = 709
temp = self.rho_1 * (self.rho_2 - sv)
temp = numpy.where(temp > magic_exp_number, numpy.inf, temp)
sigma_v = (2* self.e0) / (1 + numpy.exp(temp))
return sigma_v
def update_derived_parameters(self):
self.Heke = self.He * self.ke
self.Hiki = self.Hi * self.ki
self.ke_2 = 2 * self.ke
self.ki_2 = 2 * self.ki
self.keke = self.ke**2
self.kiki = self.ki**2
|
stuart-knock/tvb-library
|
tvb/simulator/models/jansen_rit.py
|
Python
|
gpl-2.0
| 25,472
|
[
"NEURON"
] |
f5fd48ea59de2efedd3557e42082c64aded95bb1026fa1b4e3c95a2592978c9f
|
#-*- coding: utf-8 -*-
'''
Created on 25 mars. 2014
Toolbox for downloading era_interim Parameters
depending to the code EMCWF a shapefile or an extend for the area,
the period needed and an optional outputFile for downloaded raster
@author: yoann Moreau
@author: benjamin tardy
'''
import sys
import getopt
import os
#from netCDF4 import Dataset
import gdal
import osr
import numpy
import utils as utils
from ecmwfapi import ECMWFDataServer
def main(argv):
try:
opts,argv = getopt.getopt(argv,":h:i:e:s:o:c:E:t:p:g:P:m:",['help','[outFile]','code','[shapeFile]','start','end','[tr]'])
except getopt.GetoptError:
print 'error in parameter for eraInterimDownload. type eraInterimDownload.py -help for more detail on use '
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'eraInterimDownload.py '
print ' [mandatory] : ',
print ' --code <EraInterimCode>'
print ' --init <dateStart YYYY-MM-DD>'
print ' --end <dateEnd YY-MM-DD>'
print ' --shapefile <shapefile> OU -Extend < xmin,ymax,xmax,ymin>'
print ' [optional] :'
print ' --time <EraInterim Time> (default 00)'
print ' --step <EraInterim Step> (default 3,6,9,12)'
print ' --grid <EraInterim Time> (default 0.75)'
print ' --outfile <outfolder> (default /home/user/eraInterim)'
print ' --proxy <proxy : True/False> (default False)'
print ' --mode <mode : analyse/forcast> (default analyse)'
print ''
print 'EXAMPLES'
print '--temperature on a shapefile'
print 'python eraInterimDownload.py -c 167 -i 2014-01-01 -e 2014-01-02 -s PATH_TO_SHAPE'
print '--pressure on a area'
print 'python eraInterimDownload.py -c 134 -i 2014-01-01 -e 2014-01-02 -E xmin,ymax,xmax,ymin'
print ''
print ' CODE PARAMETERS'
print ''
print 'total precipitation : 228 [m of water]'
print '2 metre temperature : 167 [K]'
print 'maximum 2m temperature since last post-processing step : 201 [K]'
print 'minimum 2m temperature since last post-processing step : 202 [K]'
print 'surface pressure : 134 [Pa]'
print '2 metre dewpoint : 168 [K]'
print '10 metre eastward wind component X X 165 [m s-1]'
print '10 metre northward wind component X X 166 [m s-1]'
print '...'
print 'see http://old.ecmwf.int/publications/library/ecpublications/_pdf/era/era_report_series/RS_1_v2.pdf for more references'
sys.exit()
elif opt in ('-o','--outFolder'):
oFolder = arg
elif opt in ('-c','--code'):
codeEra = arg.split(',')
elif opt in ('-i','--start'):
startDate = arg
elif opt in ('-e','--end'):
endDate = arg
elif opt in ('-s','--shapefile'):
pathToShapefile = arg
elif opt in ('-E','--tr'):
extend = arg.split(',')
elif opt in ('-t','--time'):
time = arg.split(',')
elif opt in ('-g','--grid'):
grid = arg
elif opt in ('-p','--step'):
step = arg.split(',')
elif opt in ('-P','--proxy'):
proxy = arg
elif opt in ('-m','--mode'):
mode = arg
if len(sys.argv) < 8:
print 'eraInterimDownload.py'
print ' -c <EraInterimCode> -list possible-'
print ' -i <dateStart YYYY-MM-DD> '
print ' -e <dateEnd YY-MM-DD>'
print ' -s <shapefile> '
print ' or'
print ' -E < xmin,ymax,xmax,ymin>]'
print ''
print ' [-t <eraInterim time parameters in 00/06/12/18> (default 00,12)] -list possible-'
print ' [-g <size of grid in 0.125/0.25/0.5/0.75/1.125/1.5/2/2.5/3> (default 0.75)]'
print ' [-p <eraInterim step parameter in 00/03/06/12> default 3,6,9,12] -list possible-'
print ' [-o <outfolder> (default /home/user/eraInterim)]'
print ' [-P <proxy> (default False)]'
print ''
print 'For help on interimCode -help'
sys.exit(2)
try:
oFolder
except NameError:
oFolder = os.path.expanduser('~')
oFolder = oFolder + '/eraInterim'
print "output folder not precised : downloaded eraInterim images on "+oFolder
# verification du folder/or creation if not exists
utils.checkForFolder(oFolder)
try:
codeEra
except NameError:
exit ('parameters need not precise. Please give the era Interim parameter you wish')
try:
startDate
except NameError:
exit ('init Date not precised')
# verification si sartDate est une date
startDate=utils.checkForDate(startDate)
try:
endDate
except NameError:
exit ('end Date not specified')
# verification si sartDate est une date
endDate=utils.checkForDate(endDate)
try:
pathToShapefile
except NameError:
try:
extend
except NameError:
exit ('no Area of interest have been specified. please use -shp or -tr to declare it')
if 'pathToShapefile' in locals():
extendArea=utils.convertShpToExtend(pathToShapefile)
else:
extendArea=extend
extendArea=utils.checkForExtendValidity(extendArea)
try:
time
except NameError:
time=['00','12']
time=utils.checkForTimeValidity(time)
try:
grid
except NameError:
grid='0.75'
grid=utils.checkForGridValidity(grid)
try:
step
except NameError:
step=[3,6,9,12]
step=utils.checkForStepValidity(step)
try:
proxy
except NameError:
proxy=False
try:
mode
except NameError:
mode='analyse'
#Proxy parameteres needed
if(proxy):
login = raw_input('login proxy : ')
pwd = raw_input('password proxy : : ')
site = raw_input('site (surf.cnes.fr) : ')
os.environ["http_proxy"] = "http://%s:%s@%s:8050"%(login,pwd,site)
os.environ["https_proxy"] = "http://%s:%s@%s:8050"%(login,pwd,site)
#Create param if first Time
if (not utils.checkForFile(os.path.expanduser('~')+'/.ecmwfapirc')):
print ('for first connexion you have to define yout key and password on ecmwf')
print ('cf https://apps.ecmwf.int/auth/login/')
print ('')
u = raw_input('user (mail) : ')
k = raw_input('keys : ')
utils.createParamFile(os.path.expanduser('~')+'/.ecmwfapirc',u,k)
#Download NETCDF
server = ECMWFDataServer()
outNETCDFFile=oFolder+'/'+"/".join([str(x) for x in codeEra])+'_'+startDate.strftime('%Y%m%d')+'_'+endDate.strftime('%Y%m%d')+'.nc'
struct=utils.create_request_sfc(startDate, endDate, time, step, grid, extendArea, codeEra,outNETCDFFile,mode)
if len(struct[0])==0:
exit()
else:
for i in struct[0]:
try :
server.retrieve(i)
except:
print("---")
exit('Error in EraInterim server')
if struct[1] is not None:
print ("")
print ("--------------------------------------------------")
print ("")
print ("Some parameters couldn't been downloaded in %s mode :" % mode + ' '+ struct[1] )
print ("They have been downloaded in %s mode" % struct[2] )
utils.convertNETCDFtoTIF(outNETCDFFile, oFolder+'/tmp.tif')
shape=utils.getShape(outNETCDFFile)
if ('pathToShapefile' in locals()):
utils.reprojRaster(oFolder+'/tmp.tif',outNETCDFFile.rsplit('.')[0]+'.tif',shape,pathToShapefile)
else:
utils.reprojRaster(oFolder+'/tmp.tif',outNETCDFFile.rsplit('.')[0]+'.tif',shape)
os.remove(oFolder+'/tmp.tif')
os.remove(outNETCDFFile)
if __name__ == '__main__':
main(sys.argv[1:])
pass
|
yoannMoreau/gfsDownload
|
python/eraInterimDownload.py
|
Python
|
cc0-1.0
| 8,200
|
[
"NetCDF"
] |
4ebc38cbade34b1d1fbaadb618f4ac16cbf4854b3270592b161a824c084fa13c
|
from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import prl_preprocess_func
__all__ = ['prl_rp']
class PrlRp(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='prl',
model_name='rp',
model_type='',
data_columns=(
'subjID',
'choice',
'outcome',
),
parameters=OrderedDict([
('Apun', (0, 0.1, 1)),
('Arew', (0, 0.1, 1)),
('beta', (0, 1, 10)),
]),
regressors=OrderedDict([
('ev_c', 2),
('ev_nc', 2),
('pe', 2),
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('Apun', 'punishment learning rate'),
('Arew', 'reward learning rate'),
('beta', 'inverse temperature'),
]),
additional_args_desc=OrderedDict([
]),
**kwargs,
)
_preprocess_func = prl_preprocess_func
def prl_rp(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Probabilistic Reversal Learning Task - Reward-Punishment Model
Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task
using Reward-Punishment Model [Ouden2013]_ with the following parameters:
"Apun" (punishment learning rate), "Arew" (reward learning rate), "beta" (inverse temperature).
.. [Ouden2013] Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. https://doi.org/10.1016/j.neuron.2013.08.030
.. codeauthor:: Jaeyeong Yang (for model-based regressors) <jaeyeong.yang1125@gmail.com>
.. codeauthor:: Harhim Park (for model-based regressors) <hrpark12@gmail.com>
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Probabilistic Reversal Learning Task, there should be 3 columns of data
with the labels "subjID", "choice", "outcome". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "choice": Integer value representing the option chosen on that trial: 1 or 2.
- "outcome": Integer value representing the outcome of that trial (where reward == 1, and loss == -1).
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "choice", "outcome".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. For this model they are: "ev_c", "ev_nc", "pe".
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('prl_rp').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
- ``model_regressor``: Dict holding the extracted model-based regressors.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import prl_rp
# Run the model and store results in "output"
output = prl_rp(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return PrlRp(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
|
CCS-Lab/hBayesDM
|
Python/hbayesdm/models/_prl_rp.py
|
Python
|
gpl-3.0
| 9,952
|
[
"NEURON"
] |
2e0382bc1b93a3fc2d0a8dac50a98276e4d2dfc4a915688e44f7d49774161f7f
|
import os
import re
import sys
import six
import json
import shutil
from nose.tools import raises
from rabix.tests import mock_app_bad_repo, mock_app_good_repo, \
result_parallel_workflow, result_nested_workflow
from rabix.main import main
from rabix.docker import docker_client, get_image
__test__ = False
@raises(Exception)
def test_provide_image_bad_repo():
uri = mock_app_bad_repo["tool"]["requirements"]["environment"][
"container"]["uri"]
imageId = mock_app_bad_repo["tool"]["requirements"]["environment"][
"container"]["imageId"]
docker = docker_client()
get_image(docker, image_id=imageId, repo=uri)
def test_provide_image_good_repo():
uri = mock_app_good_repo["tool"]["requirements"]["environment"][
"container"]["uri"]
imageId = mock_app_good_repo["tool"]["requirements"]["environment"][
"container"]["imageId"]
docker = docker_client()
get_image(docker, image_id=imageId, repo=uri)
def test_expr_and_meta():
sys.argv = ['rabix', './rabix/tests/test-expr/bwa-mem.json',
'-i', './rabix/tests/test-cmdline/inputs.json',
'--dir', 'test1', '--']
main()
with open(os.path.abspath('./test1') + '/output.sam.rbx.json') as m:
meta = json.load(m)
assert meta['metadata']['expr_test'] == 'successful'
shutil.rmtree(os.path.abspath('./test1'))
sys.argv = ['rabix', '-i', './rabix/tests/test-cmdline/inputs.json',
'./rabix/tests/test-expr/bwa-mem.json',
'--dir', 'test2']
main()
with open(os.path.abspath('./test2') + '/output.sam.rbx.json') as m:
meta = json.load(m)
assert meta['metadata']['expr_test'] == 'successful'
shutil.rmtree(os.path.abspath('./test2'))
def test_fetch_remote_files():
sys.argv = ['rabix', '--dir', 'test_fetch_remote',
'./rabix/tests/test-cmdline/bwa-mem.json#tool', '--',
'--reads',
'https://s3.amazonaws.com/rabix/rabix-test/'
'example_human_Illumina.pe_1.fastq', '--reads',
'https://s3.amazonaws.com/rabix/rabix-test/'
'example_human_Illumina.pe_2.fastq', '--reference',
'./rabix/tests/test-files/chr20.fa']
main()
assert os.path.exists(os.path.abspath('./test_fetch_remote') + '/output.sam')
shutil.rmtree(os.path.abspath('./test_fetch_remote'))
def test_params_from_input_file():
sys.argv = ['rabix', '-i', 'rabix/tests/test-cmdline/inputs.json',
'rabix/tests/test-expr/bwa-mem.json',
'-d', 'test_from_input_file']
main()
assert os.path.exists(os.path.abspath('./test_from_input_file') + '/output.sam')
shutil.rmtree(os.path.abspath('./test_from_input_file'))
def test_override_input():
sys.argv = ['rabix', '-i', 'rabix/tests/test-cmdline/inputs.json', '-d',
'test_override_input', 'rabix/tests/test-expr/bwa-mem.json', '--',
'--reference', 'rabix/tests/test-files/chr20.fa']
main()
assert os.path.exists(os.path.abspath('./test_override_input') + '/output.sam')
shutil.rmtree(os.path.abspath('./test_override_input'))
def check_result(dir, res):
def compare_file(myfile, resfile):
for k, v in six.iteritems(myfile):
if k == 'path':
print(resfile)
assert re.match(resfile.get('path'), os.path.basename(v))
elif k == 'secondaryFiles':
compare_output(v, resfile.get('secondaryFiles'))
else:
assert v == resfile.get(k)
def compare_output(myoutput, resoutput):
if isinstance(myoutput, list):
for out in myoutput:
compare_file(out, resoutput)
else:
compare_file(myoutput, resoutput)
with open('/'.join([dir, 'result.cwl.json']), 'r') as f:
dct = json.load(f)
for k, v in six.iteritems(dct):
compare_output(v, res.get(k))
def test_parallelization():
'''
Testing implicit parallelization in workflows
'''
cwd = os.getcwd()
try:
os.mkdir('test_parralelization')
sys.argv = ['rabix', '../rabix/tests/test_workflows/parallelization_workflow.json',
'--', '--input', '../rabix/tests/test-files/chr20.fa']
os.chdir('./test_parralelization')
main()
dir = filter(lambda x: os.path.isdir(x) and 'index_file' in x, os.walk('.').next()[1])
for d in dir:
check_result(d, result_parallel_workflow)
finally:
os.chdir(cwd)
shutil.rmtree(os.path.abspath('./test_parralelization'))
def test_nested_workflow():
'''
Testing nested workflows, inputs type directory and
tools which creates index files
'''
cwd = os.getcwd()
try:
os.mkdir('test_workflow')
sys.argv = ['rabix', '../rabix/tests/test_workflows/nested_workflow.json',
'--', '--input', '../rabix/tests/test-files/chr20.fa']
os.chdir('./test_workflow')
main()
dir = filter(lambda x: os.path.isdir(x) and 'index_file' in x,
os.walk('.').next()[1])
for d in dir:
check_result(d, result_nested_workflow)
finally:
os.chdir(cwd)
shutil.rmtree(os.path.abspath('./test_workflow'))
|
lowks/rabix
|
rabix/tests/integration_test.py
|
Python
|
agpl-3.0
| 5,350
|
[
"BWA"
] |
1e8605f04247b7c5980b671562179d8ed42f7602f937bc53c174faf2fc282a5f
|
"""
DIRAC.DataManagementSystem.Client test package
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Client/test/__init__.py
|
Python
|
gpl-3.0
| 167
|
[
"DIRAC"
] |
63585c9c2453881141f3fe3fae1b04931c057b01960633876057e93601fb18bb
|
# The MIT License
#
# Copyright (c) 2008 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import ns.bridge.data.Brain as Brain
class Node(object):
def __init__(self, genotype, node):
self._geno = genotype
self.node = node
def mutate(self):
self.mutateParameters()
self.mutateInputs()
def mutateInputs(self):
''' 1. reconnect inputs to other nodes
2. flip regular inputs to alt-inputs, and vice versa
'''
self.mutateConnections()
self.flipInputs()
def mutateConnections(self):
for i in range(len(self.node.inputs)):
if self.shouldMutate(self._geno.reconnectRate):
newSrc = self.reconnect(self.node.inputs[i])
self.node.inputs[i] = newSrc.id
for i in range(len(self.node.altInputs)):
if self.shouldMutate(self._geno.reconnectRate):
newSrc = self.reconnect(self.node.altInputs[i])
self.node.altInputs[i] = newSrc.id
def reconnect(self, oldSrcId):
''' Disconnect from oldSrc, and find a new source node to reconnect to.
'''
# notify the source node that it's losing an output connection
oldSrc = self._geno.getNode(oldSrcId).node
oldSrc.disconnect(self.node)
# find a random node, not ourself, to reconnect to
nodes = self._geno.getNodes()
while True:
index = self._geno.rand.randint(0, len(nodes)-1)
# notify the new source node that it's gaining an output
# connection
newSrc = nodes[index].node
if (newSrc != self.node and
newSrc != oldSrc and
newSrc.id not in self.node.inputs and
newSrc.id not in self.node.altInputs):
break
newSrc.connect(self.node)
return newSrc
def shouldMutate(self, rate):
return self._geno.rand.random() < rate
def mutateString(self, curValue, validValues):
''' Returns a random string value from validValues.'''
value = curValue
while (value == curValue):
value = validValues[self._geno.rand.randint(0, len(validValues)-1)]
return value
def mutateFloat(self, curValue):
''' Returns a random float value within a gaussian distribution of
curValue.
TODO: scale the distribution size in proportion to curValue.'''
return self._geno.rand.gauss(curValue, 2)
def mutateFloatRange(self, curValue):
''' Individually mutates both the min and max of the range, making
sure that max is greater than or equal to min.'''
range = [ ]
range.append(self.mutateFloat(curValue[0]))
range.append(self.mutateFloat(curValue[1]))
while range[1] < range[0]:
range[1] = self.mutateFloat(curValue[1])
return range
def mutateFloatList(self, numValues, curValues):
list = []
for i in range(numValues):
while(True):
if i < len(curValues):
val = self.mutateFloat(curValues[i])
else:
val = self._geno.rand.uniform(list[i-1], list[i-1] * 2)
if (i == 0 or val > list[i-1]):
break
list.append(val)
return list
###############################################################################
# Output
###############################################################################
class Output(Node):
def __init__(self, genotype, node):
super(Output, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateChannel()
self.mutateIntegrate()
self.mutateManual()
self.mutateDefuzz()
self.mutateRange()
self.mutateDelay()
self.mutateRate()
self.mutateOutput()
def mutateChannel(self):
''' Channel (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.channel = self.mutateString(self.node.channel, self._geno.outputChannels())
def mutateIntegrate(self):
''' Integrate (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.integrate = self.mutateString(self.node.integrate, Brain.Output.kIntegrateValues)
def mutateManual(self):
''' Manual (bool)'''
if self.shouldMutate(self._geno.boolMutationRate):
self.node.manual = not self.node.manual
def mutateDefuzz(self):
''' Defuzz (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.defuzz = self.mutateString(self.node.defuzz, Brain.Output.kDefuzzValues)
def mutateRange(self):
''' Range (float, float)'''
if self.shouldMutate(self._geno.rangeMutationRate):
self.node.range = self.mutateFloatRange(self.node.range)
def mutateDelay(self):
''' Delay (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.delay = self.mutateFloat(self.node.delay)
def mutateRate(self):
''' Rate (float)
aka: filter'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.rate = self.mutateFloat(self.node.rate)
def mutateOutput(self):
''' Output (float)
Only allowed to mutate if Output node is not connected and Manual
is set.'''
if (self.node.manual and
not self.node.inputs and
not self.node.altInputs and
self.shouldMutate(self._geno.floatMutationRate)):
self.node.output = self.mutateFloat(self.node.output)
def flipInputs(self):
''' No alt-inputs allows, so no mutations will occur.
'''
pass
###############################################################################
# Defuzz
###############################################################################
class Defuzz(Node):
def __init__(self, genotype, node):
super(Defuzz, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateDefuzz()
self.mutateElse()
def mutateDefuzz(self):
''' Defuzz (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.defuzz = self.mutateFloat(self.node.defuzz)
def mutateElse(self):
''' Else (bool)'''
if self.shouldMutate(self._geno.boolMutationRate):
self.node.isElse = not self.node.isElse
def flipInputs(self):
''' No alt-inputs allowed if it is an else node, otherwise one
alt-input allowed. Possibility to flip a single alt-input into
regular input, and, if no alt-input is present, possibility to
flip a regular input into an alt-input.
'''
if not self.node.isElse:
# Occasionally flip an alt input into a regular input
# grab the current input length so we don't flip any
# inputs back and forth
numInputs = len(self.node.inputs)
if self.node.altInputs and self.shouldMutate(self._geno.flipInputRate):
self.node.inputs.append(self.node.altInputs.pop())
# If there aren't any alt inputs, occasionally flip a regular
# input into an alt input
if (not self.node.altInputs and
self.shouldMutate(self._geno.flipInputRate)):
index = self._geno.rand.randint(0, numInputs - 1)
value = self.node.inputs.pop(index)
self.node.altInputs.append(value)
###############################################################################
# Or
###############################################################################
class Or(Node):
def __init__(self, genotype, node):
super(Or, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateWeight()
self.mutateType()
def mutateWeight(self):
''' Weight (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.weight = self.mutateFloat(self.node.weight)
def mutateType(self):
''' Type (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.type = self.mutateString(self.node.type, Brain.Or.kTypeValues)
def flipInputs(self):
''' Infinite alt-input allowed. Each input can be flipped into an
alt-input, and each alt-input can be flipped into a regular input.
'''
# record the current number of inputs, since the inputs list may be
# extended by flipped alt-inputs and we don't want those new inputs
# to be flipped again
numInputs = len(self.node.inputs)
# iterate through the list from end to beginning so that our iteration
# isn't confused when we remove elements from the list
for i in range(len(self.node.altInputs), 0, -1):
if self.shouldMutate(self._geno.flipInputRate):
self.node.inputs.append(self.node.altInputs.pop(i-1))
for i in range(numInputs, 0, -1):
if self.shouldMutate(self._geno.flipInputRate):
self.node.altInputs.append(self.node.inputs.pop(i-1))
###############################################################################
# Rule
###############################################################################
class Rule(Node):
def __init__(self, genotype, node):
super(Rule, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateWeight()
self.mutateType()
def mutateWeight(self):
''' Weight (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.weight = self.mutateFloat(self.node.weight)
def mutateType(self):
''' Type (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.type = self.mutateString(self.node.type, Brain.Rule.kTypeValues)
def flipInputs(self):
''' Infinite alt-input allowed. Each input can be flipped into an
alt-input, and each alt-input can be flipped into a regular input.
'''
# record the current number of inputs, since the inputs list may be
# extended by flipped alt-inputs and we don't want those new inputs
# to be flipped again
numInputs = len(self.node.inputs)
# iterate through the list from end to beginning so that our iteration
# isn't confused when we remove elements from the list
for i in range(len(self.node.altInputs), 0, -1):
if self.shouldMutate(self._geno.flipInputRate):
self.node.inputs.append(self.node.altInputs.pop(i-1))
for i in range(numInputs, 0, -1):
if self.shouldMutate(self._geno.flipInputRate):
self.node.altInputs.append(self.node.inputs.pop(i-1))
###############################################################################
# Fuzz
###############################################################################
class Fuzz(Node):
def __init__(self, genotype, node):
super(Fuzz, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateCurve()
self.mutateInterpolation()
self.mutateWrap()
def mutateCurve(self):
''' Inference (String) Points (float(s))'''
mutatePoints = False
if self.shouldMutate(self._geno.stringMutationRate):
self.node.inference = self.mutateString(self.node.inference, Brain.Fuzz.kInferenceValues)
mutatePoints = True
else:
mutatePoints = self.shouldMutate(self._geno.listMutationRate)
if mutatePoints:
self.node.inferencePoints = self.mutateFloatList(Brain.Fuzz.kInferenceNum[self.node.inference],
self.node.inferencePoints)
def mutateInterpolation(self):
''' Interpolation (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.interpolation = self.mutateString(self.node.interpolation, Brain.Fuzz.kInterpolationValues)
def mutateWrap(self):
''' Wrap (bool)'''
if self.shouldMutate(self._geno.boolMutationRate):
self.node.wrap = not self.node.wrap
def flipInputs(self):
''' No alt-inputs allows, so no mutations will occur.
'''
pass
###############################################################################
# Noise
###############################################################################
class Noise(Node):
def __init__(self, genotype, node):
super(Noise, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateRate()
def mutateRate(self):
''' Rate (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.rate = self.mutateFloat(self.node.rate)
def flipInputs(self):
''' No alt-inputs allows, so no mutations will occur.
'''
pass
###############################################################################
# Timer
###############################################################################
class Timer(Node):
def __init__(self, genotype, node):
super(Timer, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateRate()
self.mutateTrigger()
self.mutateRange()
self.mutateEndless()
def mutateRate(self):
''' Rate (float)'''
if self.shouldMutate(self._geno.floatMutationRate):
self.node.rate = self.mutateFloat(self.node.rate)
def mutateTrigger(self):
''' Trigger (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.trigger = self.mutateString(self.node.trigger, Brain.Timer.kTriggerValues)
def mutateRange(self):
''' Range (float, float)'''
if self.shouldMutate(self._geno.rangeMutationRate):
self.node.range = self.mutateFloatRange(self.node.range)
def mutateEndless(self):
''' Endless (bool)'''
if self.shouldMutate(self._geno.boolMutationRate):
self.node.endless = not self.node.endless
def flipInputs(self):
''' One alt and one regular input allowed.
'''
if (self.node.inputs or self.node.altInputs):
if self.shouldMutate(self._geno.flipInputRate):
if self.node.inputs and self.node.altInputs:
temp = self.node.inputs.pop()
self.node.inputs.append(self.node.altInputs.pop())
self.node.altInputs.append(temp)
elif not self.node.altInputs:
self.node.altInputs.append(self.node.inputs.pop())
else:
self.node.inputs.append(self.node.altInputs.pop())
###############################################################################
# Input
###############################################################################
class Input(Node):
def __init__(self, genotype, node):
super(Input, self).__init__(genotype, node)
def mutateParameters(self):
self.mutateValue()
self.mutateIntegrate()
self.mutateRange()
def mutateValue(self):
''' Mutate either the Channel (string) or the Output (float)
If the channel is set and it should mutate, switch to
output for some percentage of the time. And vice versa.'''
if self.node.channel:
if self.shouldMutate(self._geno.stringMutationRate):
if self.shouldMutate(self._geno.switchInputRate):
self.node.channel = ""
self.node.output = self.mutateFloat(self.node.output)
else:
self.node.channel = self.mutateString(self.node.channel, self._geno.inputChannels())
elif self.shouldMutate(self._geno.floatMutationRate):
if self.shouldMutate(self._geno.switchInputRate):
self.node.channel = self.mutateString(self.node.channel, self._geno.inputChannels())
else:
self.node.output = self.mutateFloat(self.node.output)
def mutateIntegrate(self):
''' Integrate (string)'''
if self.shouldMutate(self._geno.stringMutationRate):
self.node.integrate = self.mutateString(self.node.integrate, Brain.Input.kIntegrateValues)
def mutateRange(self):
''' Range (float, float)'''
if self.shouldMutate(self._geno.rangeMutationRate):
self.node.range = self.mutateFloatRange(self.node.range)
def flipInputs(self):
''' No alt-inputs allows, so no mutations will occur.
'''
pass
|
redpawfx/massiveImporter
|
python/ns/evolve/Mutate.py
|
Python
|
mit
| 15,629
|
[
"Gaussian"
] |
42692675d67243dd4aa25dc6de4f77c802d083222288d4f11403ca9685a7a0ba
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytic Fourier transformation for AO and AO-pair value
'''
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.gto.moleintor import libcgto
# TODO: in C code, store complex data in two vectors for real and imag part
#
# \int mu*nu*exp(-ik*r) dr
#
# gxyz is the index for Gvbase
def ft_aopair(mol, Gv, shls_slice=None, aosym='s1', b=numpy.eye(3),
gxyz=None, Gvbase=None, buf=None, intor='GTO_ft_ovlp',
comp=1, verbose=None):
r''' FT transform AO pair
\int i(r) j(r) exp(-ikr) dr^3
'''
intor = mol._add_suffix(intor)
if shls_slice is None:
shls_slice = (0, mol.nbas, 0, mol.nbas)
nGv = Gv.shape[0]
if (gxyz is None or b is None or Gvbase is None
# backward compatibility for pyscf-1.2, in which the argument Gvbase is gs
or (Gvbase is not None and isinstance(Gvbase[0], (int, numpy.integer)))):
GvT = numpy.asarray(Gv.T, order='C')
p_gxyzT = lib.c_null_ptr()
p_gs = (ctypes.c_int*3)(0,0,0)
p_b = (ctypes.c_double*1)(0)
eval_gz = 'GTO_Gv_general'
else:
if abs(b-numpy.diag(b.diagonal())).sum() < 1e-8:
eval_gz = 'GTO_Gv_orth'
else:
eval_gz = 'GTO_Gv_nonorth'
GvT = numpy.asarray(Gv.T, order='C')
gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)
p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)
b = numpy.hstack((b.ravel(), numpy.zeros(3)) + Gvbase)
p_b = b.ctypes.data_as(ctypes.c_void_p)
p_gs = (ctypes.c_int*3)(*[len(x) for x in Gvbase])
ao_loc = gto.moleintor.make_loc(mol._bas, intor)
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
if aosym == 's1':
if (shls_slice[:2] == shls_slice[2:4] and
intor.startswith('GTO_ft_ovlp')):
fill = getattr(libcgto, 'GTO_ft_fill_s1hermi')
else:
fill = getattr(libcgto, 'GTO_ft_fill_s1')
shape = (nGv,ni,nj,comp)
else:
fill = getattr(libcgto, 'GTO_ft_fill_s2')
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
nij = i1*(i1+1)//2 - i0*(i0+1)//2
shape = (nGv,nij,comp)
mat = numpy.ndarray(shape, order='F', dtype=numpy.complex128, buffer=buf)
fn = libcgto.GTO_ft_fill_drv
intor = getattr(libcgto, intor)
eval_gz = getattr(libcgto, eval_gz)
fn(intor, eval_gz, fill, mat.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(comp), (ctypes.c_int*4)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(0),
GvT.ctypes.data_as(ctypes.c_void_p),
p_b, p_gxyzT, p_gs, ctypes.c_int(nGv),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p))
mat = numpy.rollaxis(mat, -1, 0)
if comp == 1:
mat = mat[0]
return mat
# gxyz is the index for Gvbase
def ft_ao(mol, Gv, shls_slice=None, b=numpy.eye(3),
gxyz=None, Gvbase=None, verbose=None):
''' FT transform AO
'''
if shls_slice is None:
shls_slice = (0, mol.nbas)
nGv = Gv.shape[0]
if (gxyz is None or b is None or Gvbase is None
# backward compatibility for pyscf-1.2, in which the argument Gvbase is gs
or (Gvbase is not None and isinstance(Gvbase[0], (int, numpy.integer)))):
GvT = numpy.asarray(Gv.T, order='C')
p_gxyzT = lib.c_null_ptr()
p_gs = (ctypes.c_int*3)(0,0,0)
p_b = (ctypes.c_double*1)(0)
eval_gz = 'GTO_Gv_general'
else:
if abs(b-numpy.diag(b.diagonal())).sum() < 1e-8:
eval_gz = 'GTO_Gv_orth'
else:
eval_gz = 'GTO_Gv_nonorth'
GvT = numpy.asarray(Gv.T, order='C')
gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)
p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)
b = numpy.hstack((b.ravel(), numpy.zeros(3)) + Gvbase)
p_b = b.ctypes.data_as(ctypes.c_void_p)
p_gs = (ctypes.c_int*3)(*[len(x) for x in Gvbase])
fn = libcgto.GTO_ft_fill_drv
if mol.cart:
intor = getattr(libcgto, 'GTO_ft_ovlp_cart')
else:
intor = getattr(libcgto, 'GTO_ft_ovlp_sph')
eval_gz = getattr(libcgto, eval_gz)
fill = getattr(libcgto, 'GTO_ft_fill_s1')
ghost_atm = numpy.array([[0,0,0,0,0,0]], dtype=numpy.int32)
ghost_bas = numpy.array([[0,0,1,1,0,0,3,0]], dtype=numpy.int32)
ghost_env = numpy.zeros(4)
ghost_env[3] = numpy.sqrt(4*numpy.pi) # s function spherical norm
atm, bas, env = gto.conc_env(mol._atm, mol._bas, mol._env,
ghost_atm, ghost_bas, ghost_env)
ao_loc = mol.ao_loc_nr()
nao = ao_loc[mol.nbas]
ao_loc = numpy.asarray(numpy.hstack((ao_loc, [nao+1])), dtype=numpy.int32)
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
mat = numpy.zeros((nGv,ni), order='F', dtype=numpy.complex)
shls_slice = shls_slice + (mol.nbas, mol.nbas+1)
fn(intor, eval_gz, fill, mat.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(1), (ctypes.c_int*4)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(0),
GvT.ctypes.data_as(ctypes.c_void_p),
p_b, p_gxyzT, p_gs, ctypes.c_int(nGv),
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(atm)),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(bas)),
env.ctypes.data_as(ctypes.c_void_p))
return mat
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.atom = '''C 1.3 .2 .3
C .1 .1 1.1
'''
mol.basis = 'ccpvdz'
#mol.basis = {'C': [[0, (2.4, .1, .6), (1.0,.8, .4)], [1, (1.1, 1)]]}
#mol.basis = {'C': [[0, (2.4, 1)]]}
mol.unit = 'B'
mol.build(0,0)
L = 5.
n = 20
a = numpy.diag([L,L,L])
b = scipy.linalg.inv(a)
gs = [n,n,n]
gxrange = range(gs[0]+1)+range(-gs[0],0)
gyrange = range(gs[1]+1)+range(-gs[1],0)
gzrange = range(gs[2]+1)+range(-gs[2],0)
gxyz = lib.cartesian_prod((gxrange, gyrange, gzrange))
Gv = 2*numpy.pi * numpy.dot(gxyz, b)
import time
print(time.clock())
print(numpy.linalg.norm(ft_aopair(mol, Gv, None, 's1', b, gxyz, gs)) - 63.0239113778)
print(time.clock())
print(numpy.linalg.norm(ft_ao(mol, Gv, None, b, gxyz, gs))-56.8273147065)
print(time.clock())
|
gkc1000/pyscf
|
pyscf/gto/ft_ao.py
|
Python
|
apache-2.0
| 7,194
|
[
"PySCF"
] |
e10789114becaa5311c01e49ae3019a16183b78a37e53ef0271cb2ead20981f4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides utility classes for string operations.
"""
import re
from fractions import Fraction
def str_delimited(results, header=None, delimiter="\t"):
"""
Given a tuple of tuples, generate a delimited string form.
>>> results = [["a","b","c"],["d","e","f"],[1,2,3]]
>>> print(str_delimited(results,delimiter=","))
a,b,c
d,e,f
1,2,3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
returnstr = ""
if header is not None:
returnstr += delimiter.join(header) + "\n"
return returnstr + "\n".join([delimiter.join([str(m) for m in result])
for result in results])
def formula_double_format(afloat, ignore_ones=True, tol=1e-8):
"""
This function is used to make pretty formulas by formatting the amounts.
Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.
Args:
afloat (float): a float
ignore_ones (bool): if true, floats of 1 are ignored.
tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2
Returns:
A string representation of the float for formulas.
"""
if ignore_ones and afloat == 1:
return ""
elif abs(afloat - int(afloat)) < tol:
return str(int(afloat))
else:
return str(round(afloat, 8))
def latexify(formula):
"""
Generates a LaTeX formatted formula. E.g., Fe2O3 is transformed to
Fe$_{2}$O$_{3}$.
Args:
formula (str): Input formula.
Returns:
Formula suitable for display as in LaTeX with proper subscripts.
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1$_{\2}$", formula)
def htmlify(formula):
"""
Generates a HTML formatted formula, e.g. Fe2O3 is transformed to
Fe<sub>2</sub>O</sub>3</sub>
:param formula:
:return:
"""
return re.sub(r"([A-Za-z\(\)])([\d\.]+)", r"\1<sub>\2</sub>", formula)
def unicodeify(formula):
"""
Generates a formula with unicode subscripts, e.g. Fe2O3 is transformed
to Fe₂O₃. Does not support formulae with decimal points.
:param formula:
:return:
"""
if '.' in formula:
raise ValueError('No unicode character exists for subscript period.')
subscript_unicode_map = {0: '₀', 1: '₁', 2: '₂', 3: '₃', 4: '₄',
5: '₅', 6: '₆', 7: '₇', 8: '₈', 9: '₉'}
for original_subscript, subscript_unicode in subscript_unicode_map.items():
formula = formula.replace(str(original_subscript), subscript_unicode)
return formula
def latexify_spacegroup(spacegroup_symbol):
r"""
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol)
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym)
def unicodeify_spacegroup(spacegroup_symbol):
r"""
Generates a unicode formatted spacegroup. E.g., P2$_{1}$/c is converted to
P2₁/c and P$\\overline{1}$ is converted to P̅1.
Args:
spacegroup_symbol (str): A spacegroup symbol as LaTeX
Returns:
A unicode spacegroup with proper subscripts and overlines.
"""
if not spacegroup_symbol:
return ""
subscript_unicode_map = {
0: "₀",
1: "₁",
2: "₂",
3: "₃",
4: "₄",
5: "₅",
6: "₆",
7: "₇",
8: "₈",
9: "₉",
}
symbol = latexify_spacegroup(spacegroup_symbol)
for number, unicode_number in subscript_unicode_map.items():
symbol = symbol.replace("$_{" + str(number) + "}$", unicode_number)
symbol = symbol.replace("_" + str(number), unicode_number)
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = symbol.replace("$\\overline{", overline)
symbol = symbol.replace("$", "")
symbol = symbol.replace("{", "")
symbol = symbol.replace("}", "")
return symbol
def unicodeify_species(specie_string):
r"""
Generates a unicode formatted species string, with appropriate
superscripts for oxidation states.
Args:
specie_string (str): Species string, e.g. O2-
Returns:
Species string, e.g. O²⁻
"""
if not specie_string:
return ""
superscript_unicode_map = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"+": "⁺",
"-": "⁻",
}
for character, unicode_character in superscript_unicode_map.items():
specie_string = specie_string.replace(character, unicode_character)
return specie_string
def stream_has_colours(stream):
"""
True if stream supports colours. Python cookbook, #475186
"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
return False # guess false in case of error
def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=('x', 'y', 'z'), c='', delim=','):
"""
Convenience method. Given matrix returns string, e.g. x+2y+1/4
:param matrix
:param translation_vec
:param components: either ('x', 'y', 'z') or ('a', 'b', 'c')
:param c: optional additional character to print (used for magmoms)
:param delim: delimiter
:return: xyz string
"""
parts = []
for i in range(3):
s = ''
m = matrix[i]
t = translation_vec[i]
for j, dim in enumerate(components):
if m[j] != 0:
f = Fraction(m[j]).limit_denominator()
if s != '' and f >= 0:
s += '+'
if abs(f.numerator) != 1:
s += str(f.numerator)
elif f < 0:
s += '-'
s += c + dim
if f.denominator != 1:
s += '/' + str(f.denominator)
if t != 0:
s += ('+' if (t > 0 and s != '') else '') + str(Fraction(t).limit_denominator())
if s == '':
s += '0'
parts.append(s)
return delim.join(parts)
def disordered_formula(disordered_struct, symbols=('x', 'y', 'z'), fmt='plain'):
"""
Returns a formula of a form like AxB1-x (x=0.5)
for disordered structures. Will only return a
formula for disordered structures with one
kind of disordered site at present.
Args:
disordered_struct: a disordered structure
symbols: a tuple of characters to use for
subscripts, by default this is ('x', 'y', 'z')
but if you have more than three disordered
species more symbols will need to be added
fmt (str): 'plain', 'HTML' or 'LaTeX'
Returns (str): a disordered formula string
"""
# this is in string utils and not in
# Composition because we need to have access
# to site occupancies to calculate this, so
# have to pass the full structure as an argument
# (alternatively this could be made a method on
# Structure)
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
if disordered_struct.is_ordered:
raise ValueError("Structure is not disordered, "
"so disordered formula not defined.")
disordered_site_compositions = {site.species
for site in disordered_struct if not site.is_ordered}
if len(disordered_site_compositions) > 1:
# this probably won't happen too often
raise ValueError("Ambiguous how to define disordered "
"formula when more than one type of disordered "
"site is present.")
disordered_site_composition = disordered_site_compositions.pop()
disordered_species = {str(sp) for sp, occu in disordered_site_composition.items()}
if len(disordered_species) > len(symbols):
# this probably won't happen too often either
raise ValueError("Not enough symbols to describe disordered composition: "
"{}".format(symbols))
symbols = list(symbols)[0:len(disordered_species) - 1]
comp = disordered_struct.composition.get_el_amt_dict().items()
# sort by electronegativity, as per composition
comp = sorted(comp, key=lambda x: get_el_sp(x[0]).X)
disordered_comp = []
variable_map = {}
total_disordered_occu = sum([occu for sp, occu in comp
if str(sp) in disordered_species])
# composition to get common factor
factor_comp = disordered_struct.composition.as_dict()
factor_comp['X'] = total_disordered_occu
for sp in disordered_species:
del factor_comp[str(sp)]
factor_comp = Composition.from_dict(factor_comp)
factor = factor_comp.get_reduced_formula_and_factor()[1]
total_disordered_occu /= factor
remainder = "{}-{}".format(formula_double_format(total_disordered_occu, ignore_ones=False),
'-'.join(symbols))
for sp, occu in comp:
sp = str(sp)
if sp not in disordered_species:
disordered_comp.append((sp, formula_double_format(occu / factor)))
else:
if len(symbols) > 0:
symbol = symbols.pop(0)
disordered_comp.append((sp, symbol))
variable_map[symbol] = occu / total_disordered_occu / factor
else:
disordered_comp.append((sp, remainder))
if fmt == 'LaTeX':
sub_start = "_{"
sub_end = "}"
elif fmt == 'HTML':
sub_start = "<sub>"
sub_end = "</sub>"
elif fmt != 'plain':
raise ValueError("Unsupported output format, "
"choose from: LaTeX, HTML, plain")
disordered_formula = []
for sp, occu in disordered_comp:
disordered_formula.append(sp)
if occu: # can be empty string if 1
if fmt != 'plain':
disordered_formula.append(sub_start)
disordered_formula.append(occu)
if fmt != 'plain':
disordered_formula.append(sub_end)
disordered_formula.append(" ")
disordered_formula += ["{}={} ".format(k, formula_double_format(v))
for k, v in variable_map.items()]
comp = disordered_struct.composition
return "".join(map(str, disordered_formula))[0:-1]
class StringColorizer:
"""
Provides coloring for strings in terminals.
"""
colours = {
"default": "",
"blue": "\x1b[01;34m",
"cyan": "\x1b[01;36m",
"green": "\x1b[01;32m",
"red": "\x1b[01;31m",
}
def __init__(self, stream):
"""
:param stream: Input stream
"""
self.has_colours = stream_has_colours(stream)
def __call__(self, string, colour):
"""
:param string: Actual string
:param colour: Color to assign.
:return: Colored string.
"""
if self.has_colours:
code = self.colours.get(colour.lower(), "")
if code:
return code + string + "\x1b[00m"
else:
return string
else:
return string
if __name__ == "__main__":
import doctest
doctest.testmod()
|
fraricci/pymatgen
|
pymatgen/util/string.py
|
Python
|
mit
| 11,931
|
[
"pymatgen"
] |
a39e56a4a14787fc819cdbb1baf2679606f5b15674fc1e7bd2ae29a251fa7025
|
"""
Unit tests for HDF5 parsing; runs tests in multiple libraries
"""
__author__ = 'Dallas R. Trinkle'
import unittest
import numpy as np
import h5py
import onsager.crystal as crystal
import onsager.PowerExpansion as PE
import onsager.GFcalc as GFcalc
import onsager.crystalStars as stars
import onsager.OnsagerCalc as OnsagerCalc
T3D = PE.Taylor3D
class HDF5ParsingTests(unittest.TestCase):
def setUp(self):
self.f = h5py.File('/dev/null', 'w', driver='core', backing_store=False)
def tearDown(self):
self.f.close()
def testPowerExpansion(self):
"""Test whether we can write and read an HDF5 group containing a PowerExpansion"""
basis = [(np.eye(2), np.array([0.5,-np.sqrt(0.75),0.])),
(np.eye(2), np.array([0.5,np.sqrt(0.75),0.])),
(np.eye(2), np.array([-1.,0.,0.])),
(np.eye(2), np.array([-0.5,-np.sqrt(0.75),0.])),
(np.eye(2), np.array([-0.5,np.sqrt(0.75),0.])),
(np.eye(2), np.array([1.,0.,0.])),
(np.eye(2)*2, np.array([0.,0.,1.])),
(np.eye(2)*2, np.array([0.,0.,-1.])),
]
T3D()
c1 = T3D([c[0] for c in T3D.constructexpansion(basis, N=4, pre=(0,1,1/2,1/6,1/24))])
c2 = T3D([c[0] for c in T3D.constructexpansion(basis, N=4, pre=(0,-1j,-1/2,+1j/6,1/24))])
c1.addhdf5(self.f.create_group('T3D-c1'))
c2.addhdf5(self.f.create_group('T3D-c2'))
c1copy = T3D.loadhdf5(self.f['T3D-c1'])
c2copy = T3D.loadhdf5(self.f['T3D-c2'])
for (a, b) in [(c1, c1copy), (c2, c2copy)]:
self.assertEqual(len(a.coefflist), len(b.coefflist))
for (n0, l0, coeff0), (n1, l1, coeff1) in zip(a.coefflist, b.coefflist):
self.assertEqual(n0, n1)
self.assertEqual(l0, l1)
self.assertTrue(np.all(coeff0 == coeff1))
c1.dumpinternalsHDF5(self.f.create_group('Taylor3Dinternals'))
self.assertTrue(T3D.checkinternalsHDF5(self.f['Taylor3Dinternals']))
def testGreenFunction(self):
"""Test whether we can write and read an HDF5 group containing a GFcalc"""
HCP = crystal.Crystal.HCP(1., np.sqrt(8/3))
HCP_sitelist = HCP.sitelist(0)
HCP_jumpnetwork = HCP.jumpnetwork(0, 1.01)
HCP_GF = GFcalc.GFCrystalcalc(HCP, 0, HCP_sitelist, HCP_jumpnetwork, Nmax=4)
HCP_GF.addhdf5(self.f.create_group('GFcalc'))
GFcopy = GFcalc.GFCrystalcalc.loadhdf5(HCP, self.f['GFcalc']) # note: we need to pass crystal!
HCP_GF.SetRates([2.],[0],[1.5,0.5],[0.5,1.]) # one unique site, two types of jumps
GFcopy.SetRates([2.],[0],[1.5,0.5],[0.5,1.]) # one unique site, two types of jumps
self.assertEqual(HCP_GF(0,0,np.zeros(3)), GFcopy(0,0,np.zeros(3)))
def testPairState(self):
"""Test whether conversion of different PairState groups back and forth to arrays works"""
PSlist = [ stars.PairState(i=0, j=1, R=np.array([1,0,-1]), dx=np.array([1.,0.,-1.])),
stars.PairState(i=1, j=0, R=np.array([-1,0,1]), dx=np.array([-1.,0.,1.]))]
ij, R, dx = stars.PSlist2array(PSlist)
self.assertEqual(ij.shape, (2,2))
self.assertEqual(R.shape, (2,3))
self.assertEqual(dx.shape, (2,3))
PSlistcopy = stars.array2PSlist(ij, R, dx)
for PS0, PS1 in zip(PSlist, PSlistcopy):
self.assertEqual(PS0, PS1)
def testFlattening(self):
"""Test whether conversion between lists of lists and flat lists works"""
l1 = [ [n for n in range(10)] ]
fl1, ind1 = stars.doublelist2flatlistindex(l1)
self.assertTrue(np.all(ind1 == 0))
self.assertEqual(fl1,l1[0])
l1copy = stars.flatlistindex2doublelist(fl1, ind1)
self.assertEqual(len(l1), len(l1copy))
for lis1, lis1copy in zip(l1, l1copy):
self.assertEqual(lis1, lis1copy)
l2 = [ [n for n in range(5)], [n for n in range(1)], [n for n in range(10)]]
l2copy = stars.flatlistindex2doublelist(*stars.doublelist2flatlistindex(l2))
self.assertEqual(len(l2), len(l2copy))
for lis1, lis1copy in zip(l2, l2copy):
self.assertEqual(lis1, lis1copy)
def testStarSet(self):
"""Test whether we can write and read an HDF5 group containing a StarSet"""
HCP = crystal.Crystal.HCP(1., np.sqrt(8/3))
HCP_jumpnetwork = HCP.jumpnetwork(0, 1.01)
HCP_StarSet = stars.StarSet(HCP_jumpnetwork, HCP, 0, Nshells=2)
HCP_StarSet.addhdf5(self.f.create_group('thermo'))
HCP_StarSetcopy = stars.StarSet.loadhdf5(HCP, self.f['thermo']) # note: we need to pass crystal!
self.assertEqual(HCP_StarSet.Nstates, HCP_StarSetcopy.Nstates)
self.assertEqual(HCP_StarSet.Nshells, HCP_StarSetcopy.Nshells)
for s1, s2 in zip(HCP_StarSet.states, HCP_StarSetcopy.states):
self.assertEqual(s1, s2)
self.assertEqual(HCP_StarSet.stateindex(s1), HCP_StarSet.stateindex(s2))
self.assertEqual(HCP_StarSet.starindex(s1), HCP_StarSet.starindex(s2))
def testVectorStarSet(self):
"""Test whether we can write and read an HDF5 group containing a VectorStarSet"""
HCP = crystal.Crystal.HCP(1., np.sqrt(8/3))
HCP_jumpnetwork = HCP.jumpnetwork(0, 1.01)
HCP_StarSet = stars.StarSet(HCP_jumpnetwork, HCP, 0, Nshells=2)
HCP_VectorStarSet = stars.VectorStarSet(HCP_StarSet)
HCP_VectorStarSet.addhdf5(self.f.create_group('vkinetic'))
HCP_VectorStarSetcopy = stars.VectorStarSet.loadhdf5(HCP_StarSet,
self.f['vkinetic']) # note: we need to pass StarSet!
self.assertEqual(HCP_VectorStarSet.Nvstars, HCP_VectorStarSetcopy.Nvstars)
self.assertTrue(np.all(HCP_VectorStarSet.outer == HCP_VectorStarSetcopy.outer))
for p1list, v1list, p2list, v2list in zip(HCP_VectorStarSet.vecpos, HCP_VectorStarSet.vecvec,
HCP_VectorStarSetcopy.vecpos, HCP_VectorStarSetcopy.vecvec):
self.assertEqual(p1list, p2list)
self.assertTrue(all(np.all(v1 == v2) for v1, v2 in zip(v1list, v2list)))
def testvTKdict(self):
"""Test whether we can write and read an HDF5 group containing a dictionary indexed by vTK"""
self.assertEqual(OnsagerCalc.arrays2vTKdict(*OnsagerCalc.vTKdict2arrays({})), {})
dict1 = {}
vTK = OnsagerCalc.vacancyThermoKinetics(pre=np.ones(2), betaene=np.zeros(2),
preT=np.ones(4), betaeneT=np.zeros(4))
dict1[vTK] = np.eye(3)
vTK = OnsagerCalc.vacancyThermoKinetics(pre=2.*np.ones(2), betaene=np.zeros(2),
preT=np.ones(4), betaeneT=np.ones(4))
dict1[vTK] = 2.*np.eye(3)
dict1copy = OnsagerCalc.arrays2vTKdict(*OnsagerCalc.vTKdict2arrays(dict1))
for k,v in zip(dict1.keys(), dict1.values()):
self.assertTrue(np.all(dict1copy[k] == v))
for k,v in zip(dict1copy.keys(), dict1copy.values()):
self.assertTrue(np.all(dict1[k] == v))
def testOnsagerVacancyMediated(self):
"""Test whether we can write and read an HDF5 group containing a VacancyMediated Onsager Calculator"""
HCP = crystal.Crystal.HCP(1., np.sqrt(8/3))
HCP_sitelist = HCP.sitelist(0)
HCP_jumpnetwork = HCP.jumpnetwork(0, 1.01)
HCP_diffuser = OnsagerCalc.VacancyMediated(HCP, 0, HCP_sitelist, HCP_jumpnetwork, 1)
HCP_diffuser.addhdf5(self.f) # we'll usually dump it in main
HCP_diffuser_copy = OnsagerCalc.VacancyMediated.loadhdf5(self.f) # should be fully self-contained
thermaldef = {'preV': np.array([1.]), 'eneV': np.array([0.]),
'preT0': np.array([1.,1.5]), 'eneT0': np.array([0.25,0.35])}
thermaldef.update(HCP_diffuser.maketracerpreene(**thermaldef))
for L0, Lcopy in zip(HCP_diffuser.Lij(*HCP_diffuser.preene2betafree(1.0, **thermaldef)),
HCP_diffuser_copy.Lij(*HCP_diffuser_copy.preene2betafree(1.0, **thermaldef))):
self.assertTrue(np.allclose(L0, Lcopy), msg='{}\n!=\n{}'.format(L0, Lcopy))
# compare tags
for k in HCP_diffuser.tags.keys():
self.assertEqual(HCP_diffuser.tags[k], HCP_diffuser_copy.tags[k])
# do a dictionary check (dictionaries are only added *after* a minimum of one call
HCP_diffuser.addhdf5(self.f.create_group('new'))
HCP_diffuser_copy = OnsagerCalc.VacancyMediated.loadhdf5(self.f['new']) # should be fully self-contained
for L0, Lcopy in zip(HCP_diffuser.Lij(*HCP_diffuser.preene2betafree(1.0, **thermaldef)),
HCP_diffuser_copy.Lij(*HCP_diffuser_copy.preene2betafree(1.0, **thermaldef))):
self.assertTrue(np.allclose(L0, Lcopy), msg='{}\n!=\n{}'.format(L0, Lcopy))
# Test with B2 (there are additional terms that get used when we have origin states)
B2 = crystal.Crystal(np.eye(3), [np.zeros(3), np.array([0.45, 0.45, 0.45])])
B2diffuser = OnsagerCalc.VacancyMediated(B2, 0, B2.sitelist(0), B2.jumpnetwork(0, 0.99), 1)
B2diffuser.addhdf5(self.f.create_group('B2'))
B2diffuser_copy = OnsagerCalc.VacancyMediated.loadhdf5(self.f['B2'])
Nsites, Njumps = len(B2diffuser.sitelist), len(B2diffuser.om0_jn)
tdef = {'preV': np.ones(Nsites), 'eneV': np.zeros(Njumps),
'preT0': np.ones(Njumps), 'eneT0': np.zeros(Njumps)}
tdef.update(B2diffuser.maketracerpreene(**tdef))
for L0, Lcopy in zip(B2diffuser.Lij(*B2diffuser.preene2betafree(1.0, **tdef)),
B2diffuser_copy.Lij(*B2diffuser_copy.preene2betafree(1.0, **tdef))):
self.assertTrue(np.allclose(L0, Lcopy), msg='{}\n!=\n{}'.format(L0, Lcopy))
# Test with displaced triangle (2D "B2" example):
tria2 = crystal.Crystal(np.array([[1.,0.], [0.,np.sqrt(3.)]]),
[np.zeros(2),np.array([0.5, 0.4])])
tria2diffuser = OnsagerCalc.VacancyMediated(tria2, 0, tria2.sitelist(0),
tria2.jumpnetwork(0, 1.2), 1)
tria2diffuser.addhdf5(self.f.create_group('tria'))
tria2diffuser_copy = OnsagerCalc.VacancyMediated.loadhdf5(self.f['tria'])
Nsites, Njumps = len(tria2diffuser.sitelist), len(tria2diffuser.om0_jn)
tdef2 = {'preV': np.ones(Nsites), 'eneV': np.zeros(Njumps),
'preT0': np.ones(Njumps), 'eneT0': np.zeros(Njumps)}
tdef2.update(tria2diffuser.maketracerpreene(**tdef2))
for L0, Lcopy in zip(tria2diffuser.Lij(*tria2diffuser.preene2betafree(1.0, **tdef2)),
tria2diffuser_copy.Lij(*tria2diffuser_copy.preene2betafree(1.0, **tdef2))):
self.assertTrue(np.allclose(L0, Lcopy), msg='{}\n!=\n{}'.format(L0, Lcopy))
# compare tags
for k in tria2diffuser.tags.keys():
self.assertEqual(tria2diffuser.tags[k], tria2diffuser_copy.tags[k])
|
DallasTrinkle/Onsager
|
test/test_HDF5.py
|
Python
|
mit
| 11,073
|
[
"CRYSTAL",
"VTK"
] |
3546b466d8480f7ad2fedb4368651d09e3fc16442f73ce972370b5ddcdac45bb
|
# -*- coding: utf-8 -*-
"""A plugin to generate a list of domains visited."""
from urllib import parse as urlparse
from plaso.analysis import interface
from plaso.analysis import manager
class UniqueDomainsVisitedPlugin(interface.AnalysisPlugin):
"""A plugin to generate a list all domains visited.
This plugin will extract domains from browser history events extracted by
Plaso. The list produced can be used to quickly determine if there has been
a visit to a site of interest, for example, a known phishing site.
"""
NAME = 'unique_domains_visited'
_SUPPORTED_EVENT_DATA_TYPES = frozenset([
'chrome:history:file_downloaded',
'chrome:history:page_visited',
'firefox:downloads:download',
'firefox:places:page_visited',
'macosx:lsquarantine',
'msiecf:redirected',
'msiecf:url',
'msie:webcache:container',
'opera:history',
'safari:history:visit'])
# pylint: disable=unused-argument
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
if event_data.data_type not in self._SUPPORTED_EVENT_DATA_TYPES:
return
url = getattr(event_data, 'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, 'netloc', None)
if not domain:
return
self._analysis_counter[domain] += 1
manager.AnalysisPluginManager.RegisterPlugin(UniqueDomainsVisitedPlugin)
|
kiddinn/plaso
|
plaso/analysis/unique_domains_visited.py
|
Python
|
apache-2.0
| 1,934
|
[
"VisIt"
] |
8877eb7e3738139e2d7eb943c21e7b010bb49b5eed2866a871474cc1d8686745
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
import cfme.configure.access_control as ac
from cfme import test_requirements
from cfme.configure.access_control import Tenant
from cfme.fixtures import pytest_selenium as sel
from cfme.automate import explorer as automate
from cfme.provisioning import provisioning_form
from cfme.services import requests
from cfme.web_ui import fill, flash
from utils import testgen, version
from utils.wait import wait_for
pytestmark = [
test_requirements.quota,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers')
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.provider_by_type(
metafunc, ['virtualcenter'])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope='module')
@pytest.fixture(scope="function")
def vm_name():
vm_name = 'test_quota_prov_{}'.format(fauxfactory.gen_alphanumeric())
return vm_name
@pytest.fixture(scope="module")
def domain(request):
domain = automate.Domain(name=fauxfactory.gen_alphanumeric(), enabled=True)
domain.create()
request.addfinalizer(lambda: domain.delete() if domain.exists() else None)
return domain
@pytest.fixture(scope="module")
def cls(request, domain):
tcls = automate.Class(name="ProvisionRequestQuotaVerification",
namespace=automate.Namespace.make_path("Infrastructure", "VM",
"Provisioning", "StateMachines",
parent=domain, create_on_init=True))
tcls.create()
request.addfinalizer(lambda: tcls.delete() if tcls.exists() else None)
return tcls
@pytest.fixture(scope="module")
def copy_methods(domain):
methods = ['rejected', 'validate_quotas']
for method in methods:
ocls = automate.Class(name="ProvisionRequestQuotaVerification",
namespace=automate.Namespace.make_path("Infrastructure", "VM",
"Provisioning", "StateMachines",
parent=automate.Domain(name="ManageIQ (Locked)")))
method = automate.Method(name=method, cls=ocls)
method = method.copy_to(domain)
@pytest.fixture(scope="module")
def set_domain_priority(domain):
automate.set_domain_order(domain.name)
@pytest.yield_fixture(scope="module")
def set_group_memory():
group = ac.Group(description='EvmGroup-super_administrator')
group.edit_tags("Quota - Max Memory *", '2GB')
yield
group.remove_tag("Quota - Max Memory *", "2GB")
@pytest.yield_fixture(scope="module")
def set_group_cpu():
group = ac.Group(description='EvmGroup-super_administrator')
group.edit_tags("Quota - Max CPUs *", '2')
yield
group.remove_tag("Quota - Max CPUs *", "2")
@pytest.fixture(scope="function")
def prov_data(provider, provisioning):
return {
"first_name": fauxfactory.gen_alphanumeric(),
"last_name": fauxfactory.gen_alphanumeric(),
"email": "{}@{}.test".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"manager_name": "{} {}".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"vlan": provisioning.get("vlan", None),
"datastore_name": {"name": provisioning["datastore"]},
"host_name": {"name": provisioning["host"]},
"provision_type": "Native Clone" if provider.type == "rhevm" else "VMware"
}
@pytest.fixture(scope="function")
def template_name(provisioning):
return provisioning["template"]
@pytest.fixture(scope="function")
def provisioner(request, setup_provider, provider):
def _provisioner(template, provisioning_data, delayed=None):
sel.force_navigate('infrastructure_provision_vms', context={
'provider': provider,
'template_name': template,
})
fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
flash.assert_no_errors()
return _provisioner
@pytest.mark.uncollectif(lambda: version.current_version() >= '5.5')
def test_group_quota_max_memory_check_by_tagging(
provisioner, prov_data, template_name, provider, request, vm_name, set_group_memory, bug):
""" Test group Quota-Max Memory by tagging.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Set the group quota for memory by tagging
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set RAM greater then group quota memory.
* Submit the provisioning request and wait for it to finish.
* Visit the requests page. The last message should state quota validation message.
Metadata:
test_flag: provision
"""
note = ('template {} to vm {} on provider {}'.format(template_name, vm_name, provider.key))
prov_data["vm_name"] = vm_name
prov_data["memory"] = "4096"
prov_data["notes"] = note
provisioner(template_name, prov_data)
# nav to requests page to check quota validation
row_description = 'Provision from [{}] to [{}]'.format(template_name, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=300, delay=20)
if version.current_version() >= "5.4":
assert row.last_message.text == 'Request denied due to the following quota limits:'\
'(Group Allocated Memory 0.00GB + Requested 4.00GB > Quota 2.00GB)'
else:
assert row.last_message.text == 'Request denied due to the following quota limits:'\
'(Group Allocated Memory 0.00GB + Requested 4.00GB \> Quota 2.00GB)'
@pytest.mark.uncollectif(lambda: version.current_version() >= '5.5')
def test_group_quota_max_cpu_check_by_tagging(
provisioner, prov_data, template_name, provider, request, vm_name, set_group_cpu, bug):
""" Test group Quota-Max CPU by tagging.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Set the group quota for cpu by tagging
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set CPU greater then group quota cpu.
* Submit the provisioning request and wait for it to finish.
* Visit the requests page. The last message should state quota validation message.
Metadata:
test_flag: provision
"""
note = ('template {} to vm {} on provider {}'.format(template_name, vm_name, provider.key))
prov_data["vm_name"] = vm_name
prov_data["num_sockets"] = "8"
prov_data["notes"] = note
provisioner(template_name, prov_data)
# nav to requests page to check quota validation
row_description = 'Provision from [{}] to [{}]'.format(template_name, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=sel.refresh, num_sec=300, delay=20)
if version.current_version() >= "5.4":
assert row.last_message.text == 'Request denied due to the following quota limits:'\
'(Group Allocated vCPUs 0 + Requested 8 > Quota 2)'
else:
assert row.last_message.text == 'Request denied due to the following quota limits:'\
'(Group Allocated vCPUs 0 + Requested 8 \> Quota 2)'
@pytest.mark.tier(1)
@pytest.mark.meta(blockers=[1367290])
def test_tenant_quota_max_cpu_check(
provisioner, prov_data, template_name, provider, request, vm_name, bug):
""" Test Tenant Quota-Max CPU by UI.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Set the tenant quota for cpu by UI emforcement
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set CPU greater then tenant quota cpu.
* Submit the provisioning request and wait for it to finish.
* Visit the requests page. The last message should state quota validation message.
Metadata:
test_flag: provision
"""
cpu_data = {'cpu_cb': True, 'cpu': 2}
roottenant = Tenant.get_root_tenant()
roottenant.set_quota(**cpu_data)
note = ('template {} to vm {} on provider {}'.format(template_name, vm_name, provider.key))
prov_data["vm_name"] = vm_name
prov_data["num_sockets"] = "8"
prov_data["notes"] = note
provisioner(template_name, prov_data)
# nav to requests page to check quota validation
row_description = 'Provision from [{}] to [{}]'.format(template_name, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=sel.refresh, num_sec=500, delay=20)
# BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1364381
# TODO: update assert message once the above bug is fixed.
# assert row.last_message.text == 'Request exceeds maximum allowed for the following: \
# (cpu - Used: 526 plus requested: 8 exceeds quota: 3))'
assert row.reason.text == "Quota Exceeded"
|
kzvyahin/cfme_tests
|
cfme/tests/infrastructure/test_infra_quota.py
|
Python
|
gpl-2.0
| 9,036
|
[
"VisIt"
] |
958ff4452040f4e30e42ce0ef447db32822e44f462166e59e771eff3bae2adf6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import warnings
try:
import horton
except:
warnings.warn("Running without support for horton", RuntimeWarning)
import h5py
from .utilities import load_qmfiles, number2name, name2number, angstrom2bohr, bohr2angstrom, load_json, vdw_radii, load_geometry_from_molden
from numba import jit
import os
import sh
class structure(object):
"""
A structure depends on horton for loading QM file data
and calculating ESP on a grid.
We define the grid-points on which to calculate ESP, as
well as pre-calculated arrays of distances
"""
def __init__(self, vdw_grid_rmin=1.4, vdw_grid_rmax=2.0, vdw_grid_pointdensity=2.0, vdw_grid_nsurfaces=2):
self.vdw_grid_rmin = vdw_grid_rmin
self.vdw_grid_rmax = vdw_grid_rmax
self.vdw_grid_pointdensity = vdw_grid_pointdensity
self.vdw_grid_nsurfaces = vdw_grid_nsurfaces
self.dm = None
def load_qm(self, filename, field=np.zeros(3, dtype=np.float64)):
IO = horton.IOData.from_file(filename)
self.coordinates = IO.coordinates
self.numbers = IO.numbers
self.dm = IO.get_dm_full()
self.obasis = IO.obasis
self.natoms = len(self.numbers)
self.fchkname = filename
self.field = field
def load_esp_terachem(self, terachem_scrdir, field=np.zeros(3, dtype=np.float64)):
#we really only need to provide coordinates, grid_points, (external field), and ESP
#from terachem manual:
#scr/esp.xyz – The ESP grid points in Å, together with the ESP on that point. Each row stands for
# one grid point.
# Colunm 1: The element type of the atom that the grid point originates from.
# Column 2-4: coordinates of the grid point
# Column 5: Electrostatic potential (ESP) on that grid point.
# Column 6: The index of the the atom that the grid point originates from. Order of the index is
# the same as the molecule in the input deck.
# When we use software to visualize this xyz file, only data in the first 4 columns is read by the software,
# though sometimes the 5th column can also be recognized and presents in labels (Molden).
esp_data = np.loadtxt(terachem_scrdir + '/esp.xyz', skiprows=2, dtype=str)[:,1:5].astype(np.float64)
self.grid = esp_data[:,0:3] * angstrom2bohr
self.esp_grid_qm = esp_data[:,3] #quite sure this is in hartree
self.ngridpoints = self.esp_grid_qm.shape[0]
#we assume xyz is in angstrom and convert to bohr
molden_filename = terachem_scrdir + '/' + [name for name in os.listdir(terachem_scrdir) if '.molden' in name][0]
self.coordinates, elements = load_geometry_from_molden(molden_filename)
self.numbers = np.array([name2number[el] for el in elements], dtype=np.int64)
self.natoms = self.coordinates.shape[0]
def load_esp_orca(self, gbw_file, density_file, field=np.zeros(3, dtype=np.float64)):
#we generate our own grid and run
# orca_vpot GBWName PName XYZName POTName
# GBWName = GBW file that contains the orbitals/coordinates/basis
# PName = File that contains the density (must match the GBW file basis set!); for HF/DFT ground state jobname.scfp; for tddft jobname.cisp etc...
# XYZName = File that contains the coordinates to evaluate V(r) for
# POTName = Output file with V(r)
self.field = field
#0) read in atomic positions and elements
#convert gbw to molden
gbw_base = ".".join(gbw_file.split(".")[:-1])
sh.orca_2mkl(gbw_base)
sh.orca_2mkl(gbw_base, "-molden")
self.coordinates, elements = load_geometry_from_molden(gbw_base + ".molden.input")
self.numbers = np.array([name2number[el] for el in elements], dtype=np.int64)
self.natoms = len(self.numbers)
#1) Generate grid and write it out to file
grid_file = gbw_base + ".grid"
esp_file = gbw_base + ".esp"
self.compute_grid(rmin=self.vdw_grid_rmin, rmax=self.vdw_grid_rmax, pointdensity=self.vdw_grid_pointdensity, nsurfaces=self.vdw_grid_nsurfaces)
np.savetxt(grid_file, self.grid, header=str(self.grid.shape[0]), comments=" ")
#2) Run orca_vpot to get esp on grid
sh.orca_vpot(gbw_file, density_file, grid_file, esp_file)
#3) Read in esp
#rx, ry, rz, esp(r)
esp = np.loadtxt(esp_file, skiprows=1)[:,3]
self.esp_grid_qm = esp
self.compute_rinvmat()
self.compute_xyzmat()
def compute_grid_surface(self, pointdensity=2.0, radius_scale=1.4):
"""
Generates apparent uniformly spaced points on a vdw_radii
surface of a molecule.
vdw_radii = van der Waals radius of atoms
points = number of points on a sphere around each atom
grid = output points in x, y, z
idx = used to keep track of index in grid, when generating
initial points
density = points per area on a surface
chkrm = (checkremove) used to keep track in index when
removing points
"""
points = np.zeros(self.natoms, dtype=np.int64)
for i in range(self.natoms):
points[i] = np.int(pointdensity*4*np.pi*radius_scale*vdw_radii[self.numbers[i]])
# grid = [x, y, z]
grid = np.zeros((np.sum(points), 3), dtype=np.float64)
idx = 0
for i in range(self.natoms):
N = points[i]
#Saff & Kuijlaars algorithm
for k in range(N):
h = -1.0 +2.0*k/(N-1)
theta = np.arccos(h)
if k == 0 or k == (N-1):
phi = 0.0
else:
#phi_k phi_{k-1}
phi = ((phi + 3.6/np.sqrt(N*(1-h**2)))) % (2*np.pi)
x = radius_scale*vdw_radii[self.numbers[i]]*np.cos(phi)*np.sin(theta)
y = radius_scale*vdw_radii[self.numbers[i]]*np.sin(phi)*np.sin(theta)
z = radius_scale*vdw_radii[self.numbers[i]]*np.cos(theta)
grid[idx, 0] = x + self.coordinates[i,0]
grid[idx, 1] = y + self.coordinates[i,1]
grid[idx, 2] = z + self.coordinates[i,2]
idx += 1
dist = lambda i,j: np.sqrt(np.sum((i-j)**2))
#This is the distance points have to be apart
#since they are from the same atom
grid_spacing = dist(grid[0,:], grid[1,:])
#Remove overlap all points to close to any atom
not_near_atom = np.ones(grid.shape[0], dtype=bool)
for i in range(self.natoms):
for j in range(grid.shape[0]):
r = dist(grid[j,:], self.coordinates[i,:])
if r < radius_scale*0.99*vdw_radii[self.numbers[i]]:
not_near_atom[j] = False
grid = grid[not_near_atom]
# Double loop over grid to remove close lying points
not_overlapping = np.ones(grid.shape[0], dtype=bool)
for i in range(grid.shape[0]):
for j in range(i+1, grid.shape[0]):
if (not not_overlapping[j]): continue #already marked for removal
r = dist(grid[i,:], grid[j,:])
if 0.90 * grid_spacing > r:
not_overlapping[j] = False
grid = grid[not_overlapping]
return grid
def compute_grid(self, rmin=1.4, rmax=2.0, pointdensity=1.0, nsurfaces=2):
print(rmin, rmax, pointdensity, nsurfaces)
radii = np.linspace(rmin, rmax, nsurfaces)
surfaces = []
for r in radii:
print(r)
surfaces.append(self.compute_grid_surface(pointdensity=pointdensity, radius_scale=r))
for s in surfaces:
print(len(s))
self.grid = np.concatenate(surfaces)
self.ngridpoints = len(self.grid)
def compute_rinvmat(self):
self.rinvmat = 1./np.sqrt(np.sum((self.coordinates[:,np.newaxis,:] - self.grid[np.newaxis,:,:])**2, axis=2))
def compute_xyzmat(self):
self.xyzmat = self.coordinates[:,np.newaxis,:] - self.grid[np.newaxis,:,:]
def compute_qm_esp(self):
esp_grid_qm = self.obasis.compute_grid_esp_dm(self.dm, self.coordinates, self.numbers.astype(float), self.grid)
self.esp_grid_qm = esp_grid_qm
def compute_all(self):
self.compute_grid()
self.compute_rinvmat()
self.compute_qm_esp()
def write_xyz(self, filename):
with open(filename, "w") as f:
f.write("{}\n\n".format(self.natoms))
for i in range(self.natoms):
atomname = number2name[self.numbers[i]]
f.write("{} {: .10f} {: .10f} {: .10f}\n".format(atomname, self.coordinates[i,0]*bohr2angstrom, self.coordinates[i,1]*bohr2angstrom,self.coordinates[i,2]*bohr2angstrom))
def write_grid(self, filename):
with open(filename, "w") as f:
f.write("{}\n\n".format(self.ngridpoints))
for i in range(self.ngridpoints):
atomname = 'H'
f.write("{} {: .10f} {: .10f} {: .10f}\n".format(atomname, self.grid[i,0]*bohr2angstrom, self.grid[i,1]*bohr2angstrom,self.grid[i,2]*bohr2angstrom))
def save_h5(self, filename):
"""
Save important arrays
on disk
"""
f = h5py.File(filename, "w")
f.create_dataset("coordinates", data=self.coordinates)
f.create_dataset("numbers", data=self.numbers)
f.create_dataset("natoms", data=self.natoms)
f.create_dataset("field", data=self.field)
f.create_dataset("xyzmat", data=self.xyzmat)
f.create_dataset("rinvmat", data=self.rinvmat)
f.create_dataset("esp_grid_qm", data=self.esp_grid_qm)
f.close()
def load_h5(self, filename):
f = h5py.File(filename, "r")
self.coordinates = f["coordinates"][()]
self.numbers = f["numbers"][()]
self.natoms = f["natoms"][()]
self.field = f["field"][()]
self.xyzmat = f["xyzmat"][()]
self.rinvmat = f["rinvmat"][()]
self.esp_grid_qm = f["esp_grid_qm"][()]
f.close()
class fragment(object):
def __init__(self, fragdict):
self.atomindices = np.array(fragdict["atomindices"],dtype=np.int64) - 1
self.atomnames = fragdict["atomnames"]
self.qtot = fragdict["qtot"]
self.symmetries = [list(np.array(x, dtype=np.int64) - 1) for x in fragdict["symmetries"]]
self.fullsymmetries = []
self.natoms = len(self.atomindices)
self.symmetryidx = np.copy(self.atomindices)
self.nparamtersq = 0
self.nparamtersa = 0
self.lastidx = self.atomindices[-1]
self.lastidxissym = False
self.lastidxnsym = 1 #standard, no symmetry on last atom
self.lastidxsym = [self.lastidx]
self.startguess_charge = fragdict["startguess_charge"]
self.startguess_polarizability = fragdict["startguess_polarizability"]
for iloc, idx in enumerate(self.symmetryidx):
for sym in self.symmetries:
if idx in sym:
self.symmetryidx[iloc] = sym[0]
if idx == self.lastidx:
self.lastidxissym = True
self.lastidxsym = sym
self.lastidxnsym = len(sym)
self.fullsymmetries = []
for idx in self.atomindices:
counted = False
for sym in self.fullsymmetries:
if idx in sym:
counted = True
if counted:
continue
insym = False
for sym in self.symmetries:
if idx in sym:
insym = True
break
if insym:
self.fullsymmetries.append(sym)
else:
self.fullsymmetries.append([idx])
#number of paramters less than the total amount
# due to symmetries
nsymp = 0
for sym in self.symmetries:
nsymp += len(sym) - 1
#Np = Na - nsym - (sum constraint)
self.nparametersq = self.natoms - nsymp - 1
#for isotropic polarizability, there is no constraint on
# the sum
self.nparametersa = self.natoms - nsymp
class constraints(object):
def __init__(self, filename):
data = load_json(filename)
self.filename = filename
self.name = data["name"]
self.restraint = 0.0
self.nfragments = len(data["fragments"])
self.fragments = []
self.qtot = 0.0
self.natoms = 0
self.nparametersq = 0
self.nparametersa = 0
q_red = []
a_red = []
indices = []
for i in range(self.nfragments):
frag = fragment(data["fragments"][i])
self.qtot += frag.qtot
self.natoms += frag.natoms
self.nparametersq += frag.nparametersq
self.nparametersa += frag.nparametersa
q_red += frag.startguess_charge #redundant start guesses
a_red += frag.startguess_polarizability #redundant start guesses
self.fragments.append(frag)
#get non-redundant start guess
#1) remove (symmetry) indices from end
indices = []
for frag in self.fragments:
for sym in frag.fullsymmetries[:-1]:
indices.append(sym[0])
q_sym = 0.0
for member in sym:
q_sym += q_red[member]
q_sym = q_sym / len(sym)
for member in sym:
q_red[member] = q_sym
q_red = np.array(q_red, dtype=np.float64)
self.q0 = np.zeros(self.nparametersq, dtype=np.float64)
for i, index in enumerate(indices):
self.q0[i] = q_red[index]
#same, but for polarizability.
#there is no constraint on the total polarizability, just do the symmetry part
indices = []
if a_red:
for frag in self.fragments:
for sym in frag.fullsymmetries[:]:
indices.append(sym[0])
a_sym = 0.0
for member in sym:
a_sym += a_red[member]
a_sym = a_sym / len(sym)
for member in sym:
a_red[member] = a_sym
a_red = np.array(a_red, dtype=np.float64)
self.a0 = np.zeros(self.nparametersa, dtype=np.float64)
for i, index in enumerate(indices):
self.a0[i] = a_red[index]
def expand_q(self, qcompressed):
qout = np.zeros(self.natoms, dtype=np.float64)
pcounter = 0
for frag in self.fragments:
qcur = 0.0
for sym in frag.fullsymmetries[:-1]:
for idx in sym:
qout[idx] = qcompressed[pcounter]
qcur += qout[idx]
pcounter += 1
#charge constraint. lastidxnsym is 1 if the last one is not a part of a symmetry
qlast = (frag.qtot - qcur) / len(frag.fullsymmetries[-1])
for idx in frag.fullsymmetries[-1]:
qout[idx] = qlast
return qout
def expand_a(self, acompressed):
aout = np.zeros((self.natoms,3,3), dtype=np.float64)
pcounter = 0
for frag in self.fragments:
for sym in frag.fullsymmetries[:]:
for idx in sym:
aout[idx,0,0] = acompressed[pcounter]
aout[idx,1,1] = acompressed[pcounter]
aout[idx,2,2] = acompressed[pcounter]
pcounter += 1
return aout
|
peter-reinholdt/propertyfit
|
propertyfit/structures.py
|
Python
|
gpl-3.0
| 16,394
|
[
"TeraChem"
] |
2ef82abf60e59a8ac5627cc5a4dac9f0b8bd418938f3f9e020cc42d69517d1cf
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
r'''
Applying creation or annihilation operators on FCI wavefunction
a |0>
Compute density matrices by
gamma_{ij} = <0| i^+ j |0>
Gamma_{ij,kl} = <0| i^+ j^+ l k |0>
'''
import numpy
from pyscf import gto, scf, fci
mol = gto.M(atom='H 0 0 0; Li 0 0 1.1', basis='sto3g')
m = scf.RHF(mol).run()
fs = fci.FCI(mol, m.mo_coeff)
e, c = fs.kernel()
norb = m.mo_energy.size
neleca = nelecb = mol.nelectron // 2
#
# Spin-free 1-particle density matrix
# <Psi| a_{i\alpha}^\dagger a_{j\alpha} + a_{i\beta}^\dagger a_{j\beta} |Psi>
#
dm1 = numpy.zeros((norb,norb))
for i in range(norb):
for j in range(norb):
tmp = fci.addons.des_a(c , norb, (neleca ,nelecb), j)
tmp = fci.addons.cre_a(tmp, norb, (neleca-1,nelecb), i)
dm1[i,j] += numpy.dot(tmp.flatten(), c.flatten())
tmp = fci.addons.des_b(c , norb, (neleca,nelecb ), j)
tmp = fci.addons.cre_b(tmp, norb, (neleca,nelecb-1), i)
dm1[i,j] += numpy.dot(tmp.flatten(), c.flatten())
#
# Note the swap of k and l indices for 2-PDM
# tmp = i^+ j^+ l k |0>
#
dm2aaaa = numpy.zeros((norb,norb,norb,norb))
dm2abab = numpy.zeros((norb,norb,norb,norb))
dm2bbbb = numpy.zeros((norb,norb,norb,norb))
for i in range(norb):
for j in range(norb):
for k in range(norb):
for l in range(norb):
tmp = fci.addons.des_a(c , norb, (neleca ,nelecb), k)
tmp = fci.addons.des_a(tmp, norb, (neleca-1,nelecb), l)
tmp = fci.addons.cre_a(tmp, norb, (neleca-2,nelecb), j)
tmp = fci.addons.cre_a(tmp, norb, (neleca-1,nelecb), i)
dm2aaaa[i,j,k,l] += numpy.dot(tmp.flatten(), c.flatten())
tmp = fci.addons.des_a(c , norb, (neleca ,nelecb ), k)
tmp = fci.addons.des_b(tmp, norb, (neleca-1,nelecb ), l)
tmp = fci.addons.cre_b(tmp, norb, (neleca-1,nelecb-1), j)
tmp = fci.addons.cre_a(tmp, norb, (neleca-1,nelecb ), i)
dm2abab[i,j,k,l] += numpy.dot(tmp.flatten(), c.flatten())
tmp = fci.addons.des_b(c , norb, (neleca,nelecb ), k)
tmp = fci.addons.des_b(tmp, norb, (neleca,nelecb-1), l)
tmp = fci.addons.cre_b(tmp, norb, (neleca,nelecb-2), j)
tmp = fci.addons.cre_b(tmp, norb, (neleca,nelecb-1), i)
dm2bbbb[i,j,k,l] += numpy.dot(tmp.flatten(), c.flatten())
ref1 = fs.make_rdm1(c, norb, (neleca,nelecb))
ref2aaaa, ref2aabb, ref2bbbb = fs.make_rdm12s(c, norb, (neleca,nelecb))[1]
print('Error in spin-free 1-PDM %g' % numpy.linalg.norm(ref1-dm1))
print('Error in 2-PDM aaaa %g' % numpy.linalg.norm(ref2aaaa.transpose(0,2,1,3)-dm2aaaa))
print('Error in 2-PDM aabb %g' % numpy.linalg.norm(ref2aabb.transpose(0,2,1,3)-dm2abab))
print('Error in 2-PDM bbbb %g' % numpy.linalg.norm(ref2bbbb.transpose(0,2,1,3)-dm2bbbb))
|
gkc1000/pyscf
|
examples/fci/31-apply_2nd_quantized_op.py
|
Python
|
apache-2.0
| 2,959
|
[
"PySCF"
] |
879600639ce2e08ef0778d0797d27f86acfca088e973e895831c8a981327e2e3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.