text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from Factor import Factor
class Schedule(object):
def __init__(self, name):
self._name = name
def visit(self, depth = -1, maxDepth = 0) :
pass
#
def __str__(self) :
return self._name
class ScheduleStep(Schedule) :
def __init__(self, name, factor, index):
super(ScheduleStep, self).__init__(name)
self._factor = factor
self._index = index
def visit(self, depth = -1, maxDepth = 0) :
# print "Schedule Step : " + self._name
currentFactor = self._factor
delta = currentFactor.updateMessageIndex(self._index)
return delta
class ScheduleSequence(Schedule) :
def __init__(self, name, schedules) :
super(ScheduleSequence, self).__init__(name)
self._schedules = schedules
def visit(self, depth = -1, maxDepth = 0) :
maxDelta = 0
schedules = self._schedules
for currentSchedule in schedules :
currentVisit = currentSchedule.visit(depth + 1, maxDepth)
maxDelta = max(currentVisit, maxDelta)
return maxDelta
class ScheduleLoop(Schedule) :
def __init__(self, name, scheduleToLoop, maxDelta) :
super(ScheduleLoop, self).__init__(name)
self._scheduleToLoop = scheduleToLoop
self._maxDelta = maxDelta
def visit(self, depth = -1, maxDepth = 0) :
totalIterations = 1
delta = self._scheduleToLoop.visit(depth + 1, maxDepth)
while delta > self._maxDelta :
if totalIterations > 1000 : break
delta = self._scheduleToLoop.visit(depth + 1, maxDepth)
totalIterations = totalIterations + 1
return delta
|
IDragonfire/modular-client
|
src/trueSkill/FactorGraphs/Schedule.py
|
Python
|
gpl-3.0
| 2,765
|
[
"VisIt"
] |
1cec2553b96ee81a93d1642655b8e62e3a8b4af5c3a2dcc8e250eb89558fd6f0
|
# Copyright (c) 2012-2014, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class DTC(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
The function self.inference returns a Posterior object, which summarizes
the posterior.
NB. It's not recommended to use this function! It's here for historical purposes.
"""
def __init__(self):
self.const_jitter = 1e-6
def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
assert mean_function is None, "inference with a mean function not implemented"
assert X_variance is None, "cannot use X_variance with DTC. Try varDTC."
num_inducing, _ = Z.shape
num_data, output_dim = Y.shape
#make sure the noise is not hetero
precision = 1./likelihood.gaussian_variance(Y_metadata)
if precision.size > 1:
raise NotImplementedError("no hetero noise with this implementation of DTC")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
Knm = kern.K(X, Z)
U = Knm
Uy = np.dot(U.T,Y)
#factor Kmm
Kmmi, L, Li, _ = pdinv(Kmm)
# Compute A
LiUTbeta = np.dot(Li, U.T)*np.sqrt(precision)
A = tdot(LiUTbeta) + np.eye(num_inducing)
# factor A
LA = jitchol(A)
# back substutue to get b, P, v
tmp, _ = dtrtrs(L, Uy, lower=1)
b, _ = dtrtrs(LA, tmp*precision, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T)
#compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*num_data*output_dim*np.log(precision) + \
-0.5*precision*np.sum(np.square(Y)) + \
0.5*np.sum(np.square(b))
# Compute dL_dKmm
vvT_P = tdot(v.reshape(-1,1)) + P
dL_dK = 0.5*(Kmmi - vvT_P)
# Compute dL_dU
vY = np.dot(v.reshape(-1,1),Y.T)
dL_dU = vY - np.dot(vvT_P, U.T)
dL_dU *= precision
#compute dL_dR
Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./precision + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*precision**2
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
#construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict
class vDTC(object):
def __init__(self):
self.const_jitter = 1e-6
def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
assert mean_function is None, "inference with a mean function not implemented"
assert X_variance is None, "cannot use X_variance with DTC. Try varDTC."
num_inducing, _ = Z.shape
num_data, output_dim = Y.shape
#make sure the noise is not hetero
precision = 1./likelihood.gaussian_variance(Y_metadata)
if precision.size > 1:
raise NotImplementedError("no hetero noise with this implementation of DTC")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
Knm = kern.K(X, Z)
U = Knm
Uy = np.dot(U.T,Y)
#factor Kmm
Kmmi, L, Li, _ = pdinv(Kmm)
# Compute A
LiUTbeta = np.dot(Li, U.T)*np.sqrt(precision)
A_ = tdot(LiUTbeta)
trace_term = -0.5*(np.sum(Knn)*precision - np.trace(A_))
A = A_ + np.eye(num_inducing)
# factor A
LA = jitchol(A)
# back substutue to get b, P, v
tmp, _ = dtrtrs(L, Uy, lower=1)
b, _ = dtrtrs(LA, tmp*precision, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T)
stop
#compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*num_data*output_dim*np.log(precision) + \
-0.5*precision*np.sum(np.square(Y)) + \
0.5*np.sum(np.square(b)) + \
trace_term
# Compute dL_dKmm
vvT_P = tdot(v.reshape(-1,1)) + P
LAL = Li.T.dot(A).dot(Li)
dL_dK = Kmmi - 0.5*(vvT_P + LAL)
# Compute dL_dU
vY = np.dot(v.reshape(-1,1),Y.T)
#dL_dU = vY - np.dot(vvT_P, U.T)
dL_dU = vY - np.dot(vvT_P - Kmmi, U.T)
dL_dU *= precision
#compute dL_dR
Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./precision + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*precision**2
dL_dR -=precision*trace_term/num_data
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*precision, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
#construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict
|
befelix/GPy
|
GPy/inference/latent_function_inference/dtc.py
|
Python
|
bsd-3-clause
| 5,766
|
[
"Gaussian"
] |
9974cbc02a03f6b902c7419cf06f198b63d3e9607cfc97eddb4012a111c349b2
|
# -*- coding: utf-8 -*-
#
# test_stdp_dopa.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Begin Documentation
# Name: testsuite::test_stdp_dopa - script to test stdp_dopamine_synapse model implementing dopamine-dependent spike-timing dependent plasticity as defined in [1], based on [2].
# Two neurons, which fire poisson like, are connected by a stdp_dopamine_synapse. Dopamine is release by a third neuron, which also fires poisson like.
#
# author: Wiebke Potjans
# date: October 2010
import numpy as np
import nest
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True}) # set to True to permit overwriting
delay = 1. # the delay in ms
w_ex = 45.
g = 3.83
w_in = -w_ex * g
K = 10000
f_ex = 0.8
K_ex = f_ex * K
K_in = (1.0 - f_ex) * K
nu_ex = 10.0#2.
nu_in = 10.0#2.
pg_ex = nest.Create("poisson_generator")
nest.SetStatus(pg_ex, {"rate": K_ex * nu_ex})
pg_in = nest.Create("poisson_generator")
nest.SetStatus(pg_in, {"rate": K_in * nu_in})
sd = nest.Create("spike_detector")
nest.SetStatus([sd], [ {
"label": "spikes",
"withtime": True,
"withgid": True,
"to_file": True,
} ])
neuron1 = nest.Create("iaf_psc_alpha")
neuron2 = nest.Create("iaf_psc_alpha")
dopa_neuron = nest.Create("iaf_psc_alpha")
nest.SetStatus(neuron1, {"tau_syn_ex": 0.3, "tau_syn_in": 0.3, "tau_minus": 20.0})
nest.SetStatus(neuron2, {"tau_syn_ex": 0.3, "tau_syn_in": 0.3, "tau_minus": 20.0})
vt = nest.Create("volume_transmitter")
nest.Connect(pg_ex, neuron1, params=w_ex, delay=delay)
nest.Connect(pg_ex, neuron2, params=w_ex, delay=delay)
nest.Connect(pg_ex, dopa_neuron, params=w_ex, delay=delay)
nest.Connect(pg_in, neuron1, params=w_in, delay=delay)
nest.Connect(pg_in, neuron2, params=w_in, delay=delay)
nest.Connect(pg_in, dopa_neuron, params=w_in, delay=delay)
nest.Connect(neuron1, sd)
nest.Connect(neuron2, sd)
nest.Connect(dopa_neuron, sd)
nest.CopyModel("stdp_dopamine_synapse", "dopa", {"vt": vt[0], "weight": 35., "delay": delay})
nest.CopyModel("static_synapse", "static", {"delay": delay})
nest.Connect(dopa_neuron, vt, model="static")
nest.Connect(neuron1, neuron2, model="dopa")
if nest.GetStatus(neuron2)[0]['local']:
filename = 'weight.gdf'
fname = open(filename, 'w')
else:
raise
T = 1000.0
dt = 10.0
weight = None
for t in np.arange(0, T + dt, dt):
if nest.GetStatus(neuron2)[0]['local']:
weight = nest.GetStatus(nest.FindConnections(neuron1, synapse_model="dopa"))[0]['weight']
print(weight)
weightstr = str(weight)
timestr = str(t)
data = timestr + ' ' + weightstr + '\n'
fname.write(data)
nest.Simulate(dt)
if nest.GetStatus(neuron2)[0]['local']:
print("expected weight at T=1000 ms: 28.6125 pA")
print("weight at last event: " + str(weight) + " pA")
fname.close()
|
kristoforcarlson/nest-simulator-fork
|
testsuite/manualtests/test_stdp_dopa.py
|
Python
|
gpl-2.0
| 3,451
|
[
"NEURON"
] |
bb958e099754ee901e9905f27d1b6c3a3ecf5979100fcd8abb885fdd050d7a03
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import io
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio import TreeNode
from skbio.diversity._util import (_validate_counts_vector,
_validate_counts_matrix,
_validate_otu_ids_and_tree,
_vectorize_counts_and_tree)
from skbio.tree import DuplicateNodeError, MissingNodeError
class ValidationTests(TestCase):
def test_validate_counts_vector(self):
# python list
obs = _validate_counts_vector([0, 2, 1, 3])
npt.assert_array_equal(obs, np.array([0, 2, 1, 3]))
self.assertEqual(obs.dtype, int)
# numpy array (no copy made)
data = np.array([0, 2, 1, 3])
obs = _validate_counts_vector(data)
npt.assert_array_equal(obs, data)
self.assertEqual(obs.dtype, int)
self.assertTrue(obs is data)
# single element
obs = _validate_counts_vector([42])
npt.assert_array_equal(obs, np.array([42]))
self.assertEqual(obs.dtype, int)
self.assertEqual(obs.shape, (1,))
# suppress casting to int
obs = _validate_counts_vector([42.2, 42.1, 0], suppress_cast=True)
npt.assert_array_equal(obs, np.array([42.2, 42.1, 0]))
self.assertEqual(obs.dtype, float)
# all zeros
obs = _validate_counts_vector([0, 0, 0])
npt.assert_array_equal(obs, np.array([0, 0, 0]))
self.assertEqual(obs.dtype, int)
# all zeros (single value)
obs = _validate_counts_vector([0])
npt.assert_array_equal(obs, np.array([0]))
self.assertEqual(obs.dtype, int)
def test_validate_counts_vector_invalid_input(self):
# wrong dtype
with self.assertRaises(TypeError):
_validate_counts_vector([0, 2, 1.2, 3])
# wrong number of dimensions (2-D)
with self.assertRaises(ValueError):
_validate_counts_vector([[0, 2, 1, 3], [4, 5, 6, 7]])
# wrong number of dimensions (scalar)
with self.assertRaises(ValueError):
_validate_counts_vector(1)
# negative values
with self.assertRaises(ValueError):
_validate_counts_vector([0, 0, 2, -1, 3])
def test_validate_counts_matrix(self):
# basic valid input (n=2)
obs = _validate_counts_matrix([[0, 1, 1, 0, 2],
[0, 0, 2, 1, 3]])
npt.assert_array_equal(obs[0], np.array([0, 1, 1, 0, 2]))
npt.assert_array_equal(obs[1], np.array([0, 0, 2, 1, 3]))
# basic valid input (n=3)
obs = _validate_counts_matrix([[0, 1, 1, 0, 2],
[0, 0, 2, 1, 3],
[1, 1, 1, 1, 1]])
npt.assert_array_equal(obs[0], np.array([0, 1, 1, 0, 2]))
npt.assert_array_equal(obs[1], np.array([0, 0, 2, 1, 3]))
npt.assert_array_equal(obs[2], np.array([1, 1, 1, 1, 1]))
# empty counts vectors
obs = _validate_counts_matrix(np.array([[], []], dtype=int))
npt.assert_array_equal(obs[0], np.array([]))
npt.assert_array_equal(obs[1], np.array([]))
def test_validate_counts_matrix_suppress_cast(self):
# suppress_cast is passed through to _validate_counts_vector
obs = _validate_counts_matrix(
[[42.2, 42.1, 0], [42.2, 42.1, 1.0]], suppress_cast=True)
npt.assert_array_equal(obs[0], np.array([42.2, 42.1, 0]))
npt.assert_array_equal(obs[1], np.array([42.2, 42.1, 1.0]))
self.assertEqual(obs[0].dtype, float)
self.assertEqual(obs[1].dtype, float)
with self.assertRaises(TypeError):
_validate_counts_matrix([[0.0], [1]], suppress_cast=False)
def test_validate_counts_matrix_negative_counts(self):
with self.assertRaises(ValueError):
_validate_counts_matrix([[0, 1, 1, 0, 2], [0, 0, 2, -1, 3]])
with self.assertRaises(ValueError):
_validate_counts_matrix([[0, 0, 2, -1, 3], [0, 1, 1, 0, 2]])
def test_validate_counts_matrix_unequal_lengths(self):
# len of vectors not equal
with self.assertRaises(ValueError):
_validate_counts_matrix([[0], [0, 0], [9, 8]])
with self.assertRaises(ValueError):
_validate_counts_matrix([[0, 0], [0, 0, 8], [9, 8]])
with self.assertRaises(ValueError):
_validate_counts_matrix([[0, 0, 75], [0, 0, 3], [9, 8, 22, 44]])
def test_validate_otu_ids_and_tree(self):
# basic valid input
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
# all tips observed
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 1, 1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
# no tips observed
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = []
otu_ids = []
self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
# all counts zero
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [0, 0, 0, 0, 0]
otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
def test_validate_otu_ids_and_tree_invalid_input(self):
# tree has duplicated tip ids
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, _validate_otu_ids_and_tree,
counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(io.StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
io.StringIO(
'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
io.StringIO(
'(((((OTU1:0.25,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU32']
self.assertRaises(MissingNodeError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
# single node tree
t = TreeNode.read(io.StringIO('root;'))
counts = []
otu_ids = []
self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
otu_ids, t)
def test_vectorize_counts_and_tree(self):
t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root;"))
counts = np.array([[0, 1], [1, 5], [10, 1]])
count_array, indexed, branch_lengths = \
_vectorize_counts_and_tree(counts, np.array(['a', 'b']), t)
exp_counts = np.array([[0, 1, 10], [1, 5, 1], [1, 6, 11], [1, 6, 11]])
npt.assert_equal(count_array, exp_counts.T)
if __name__ == "__main__":
main()
|
anderspitman/scikit-bio
|
skbio/diversity/tests/test_util.py
|
Python
|
bsd-3-clause
| 9,899
|
[
"scikit-bio"
] |
9bc52961e05e30a7011d5ecc4cae1456699ea13324808f6dd3c2988f9f962145
|
#########################################################
#
# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #
# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #
#
#########################################################
# coding: utf-8
# # Multi-GPU Training with Caffe2
#
# 
#
# For this tutorial we will explore multi-GPU training. We will show you a basic structure for using the `data_parallel_model` to quickly process a subset of the ImageNet database along the same design as the [ResNet-50 model](https://arxiv.org/abs/1512.03385). We will also get a chance to look under the hood at a few of Caffe2's C++ operators that efficiently handle your image pipeline, build a ResNet model, train on a single GPU and show some optimizations that are included with `data_parallel_model`, and finally we'll scale it up and show you how to parallelize your model so you can run it on multiple GPUs.
#
# ## About the Dataset
#
# A commonly used dataset for benchmarking image recognition technologies is [ImageNet](http://image-net.org/). It is huge. It has images that cover the gamut, and they're categorized by labels so that you can create image subsets of animals, plants, fungi, people, objects, you name it. It's the focus of yearly competitions and this is where deep learning and convolutional neural networks (CNN) really made its name. During the 2012 ImageNet Large-Scale Visual Recognition Challenge a CNN demonstrated accuracy more than 10% beyond the next competing method. Going from around 75% accuracy to around 85% accuracy when every year the gains were only a percent or two is a significant accomplishment.
#
# 
#
# So let's play with ImageNet and train our own model on a bunch of GPUs! You're going to need a lot space to host the 14 million images in ImageNet. How much disk space do you have? You should clear up about 300GB of space... on SSD. Spinning discs are so 2000. How much time do you have? With two GPUs maybe we'll be done in just under a week. Ready?
#
# 
#
# That's way too much space and way too long for a tutorial! If you happened to have that much space and 128 GPUs on the latest NVIDIA V100's then you're super awesome and you can replicate our recent results shown below. You might even be able to train ImageNet in under an hour. Given how this performance seems to scale, **maybe YOU can train ImageNet in a minute!** Think about all of the things you could accomplish... a model for millions of hours of video? Catalogue every cat video on YouTube? Look for your doppleganger on Imgur?
#
# Instead of tons of GPUs and the full set of data, we're going to do this cooking show style. We're going to use a small batch images to train on, and show how you can scale that up. We chose a small slice of ImageNet: a set of 640 cars and 640 boats for our training set. We have 48 cars and 48 boats for our test set. This makes our database of images around 130 MB.
#
# ## ResNet-50 Model Training Overview
#
# Below is an overview of what is needed to train and test this model across multiple GPUs. You see that it is generally not that long, nor is it that complicated. Some of the interactions for creating the parallelized model are handled by custom functions you have to write and we'll go over those later.
#
# 1. use `brew` to create a model for training (we'll create one for testing later)
# 2. create a database reader using the model helper object's `CreateDB` to pull the images
# 3. create functions to run a ResNet-50 model for one or more GPUs
# 3. create the parallelized model
# 4. loop through the number of epochs you want to run, then for each epoch
# * run the train model till you finish each batch of images
# * run the test model
# * calculate times, accuracies, and display the results
#
# ## Part 1: Setup
#
# Your first assignment is to get your training and testing image database setup. We've created one for you and all you have to do run the code block below. This assumes you know how to use IPython. When we say run a code block, you can click the block and hit the Play button above or hit Ctrl-Enter on your keyboard. If this is news to you it is advisable that you start with introductory tutorials and get used to IPython and Caffe2 basics first.
#
# The code below will download a small database of boats and cars images and their labels for you if it doesn't already exist. The images were pulled from ImageNet and added to a `lmdb` format database. You can download it directly [here](https://download.caffe2.ai/databases/resnet_trainer.zip) unzip it, and change the folder locations to an NFS if that better suits your situation. The tutorial's default location is for you to place it in `~/caffe2_notebooks/tutorial_data/resnet_trainer`.
#
# You can also swap out the database with your own as long as it is in lmdb and you change the `train_data_count` and `test_data_count` variables below. For your first time just use that database we made for you.
#
# We're going to give you all the dependencies needed for the tutorial in the block below.
#
# ### Task: Run the Setup Code
# Read and then run the code block below. Note what modules are being imported and where we're accessing the database. Note and troubleshoot any errors in case something is wrong with your environment. Don't worry about the `nccl` and `gloo` warning messages.
#
# In[ ]:
from caffe2.python import core, workspace, model_helper, net_drawer, memonger, brew
from caffe2.python import data_parallel_model as dpm
from caffe2.python.models import resnet
from caffe2.proto import caffe2_pb2
import numpy as np
import time
import os
from IPython import display
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
# This section checks if you have the training and testing databases
current_folder = os.path.join(os.path.expanduser('~'), 'caffe2_notebooks')
data_folder = os.path.join(current_folder, 'tutorial_data', 'resnet_trainer')
# Train/test data
train_data_db = os.path.join(data_folder, "imagenet_cars_boats_train")
train_data_db_type = "lmdb"
# actually 640 cars and 640 boats = 1280
train_data_count = 1280
test_data_db = os.path.join(data_folder, "imagenet_cars_boats_val")
test_data_db_type = "lmdb"
# actually 48 cars and 48 boats = 96
test_data_count = 96
# Get the dataset if it is missing
def DownloadDataset(url, path):
import requests, zipfile, StringIO
print("Downloading {} ... ".format(url))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Done downloading to {}!".format(path))
# Make the data folder if it doesn't exist
if not os.path.exists(data_folder):
os.makedirs(data_folder)
else:
print("Data folder found at {}".format(data_folder))
# See if you already have to db, and if not, download it
if not os.path.exists(train_data_db):
DownloadDataset("https://download.caffe2.ai/databases/resnet_trainer.zip", data_folder)
# ### Task: Check the Database
#
# Take a look at your data folder. You should find two subfolders, each of which will contain a single `data.mdb` file (or possibly also a lock file):
# 1. imagenet_cars_boats_train (train for training, not locomotives!)
# 2. imagenet_cars_boats_val (val for validation or testing)
#
# ## Part 2: Configure the Training
#
# Below you can tinker with some of the settings for how the model will be created. One obvious setting to try is the `gpus`. By removing one or adding one you're directly impacting the amount of time it will take to run even on this small dataset.
#
# `batch_per_device` is the number of images processed at a time on each GPU. Using the default of 32 for 2 GPUs will equate to 32 images on each GPU for a total of 64 per mini-batch, so we'll go through the whole database and complete an epoch in 20 iterations. This is something you would want to adjust if you're sharing the GPU or otherwise want to adjust how much memory this training run is going to take up. You can see in the line below it being set to `32` we're adjusting the `total_batch_size` based on the number of GPUs.
#
# `base_learning_rate` and `weight_decay` will both influence training and can be interesting to change and witness the impact on accuracy or confidence is the results that are shown in the last section of this tutorial.
#
#
# In[ ]:
# Configure how you want to train the model and with how many GPUs
# This is set to use two GPUs in a single machine, but if you have more GPUs, extend the array [0, 1, 2, n]
gpus = [0]
# Batch size of 32 sums up to roughly 5GB of memory per device
batch_per_device = 32
total_batch_size = batch_per_device * len(gpus)
# This model discriminates between two labels: car or boat
num_labels = 2
# Initial learning rate (scale with total batch size)
base_learning_rate = 0.0004 * total_batch_size
# only intends to influence the learning rate after 10 epochs
stepsize = int(10 * train_data_count / total_batch_size)
# Weight decay (L2 regularization)
weight_decay = 1e-4
# ## Part 3:
#
# ### Using Caffe2 Operators to Create a CNN
#
# Caffe2 comes with `ModelHelper` which will do a lot of the heavy lifting for you when setting up a model. Throughout the docs and tutorial this may also be called a `model helper object`. The only required parameter is `name`. It is an arbitrary name for referencing the network in your workspace: you could call it tacos or boatzncarz. For example:
#
# ```python
# taco_model = model_helper.ModelHelper(name="tacos")
# ```
#
# You should also reset your workspace if you run these parts multiple times. Do this just before creating the new model helper object.
#
# ```python
# workspace.ResetWorkspace()
# ```
#
# ### Reading from the Database
#
# Another handy function for feeding your network with images is `CreateDB`, which in this case we need to serve as a database reader for the database we've already created. You can create a reader object like this:
#
# ```python
# reader = taco_model.CreateDB(name, db, db_type)
# ```
#
# ### Task: Create a Model Helper Object
# Remember, we have two databases and each will have their own model, but for now we only need to create the training model for the training db. Use the Work Area below. Also, while you do this, experiment with IPython's development hooks by typing the first part of the name from the imported class or module and hitting the tab key. For example when creating the object you type: `train_model = model_helper.` and after the dot, hit "tab". You should see a full list of available functions. Then when you choose `ModelHelper` hit "(" then hit tab and you should see a full list of params. This is very handy when you're exploring new modules and their functions!
#
# ### Task: Create a Reader
# We also need one reader. We have established the db location, `train_data_db`, and type, `train_data_db_type`, in "Part 1: Setup", so all you have to do is name it and pass in the configs. Again, `name` is arbitrary so you could call it "kindle" if you wanted. Use the Work Area below, and when you are finished run the code block.
# In[ ]:
# LAB WORK AREA FOR PART 3
# Clear workspace to free allocated memory, in case you are running this for a second time.
workspace.ResetWorkspace()
# 1. Create your model helper object for the training model with ModelHelper
# 2. Create your database reader with CreateDB
# ## Part 4: Image Transformations (requires Caffe2 to be compiled with opencv)
#
# Now that we have a reader we should take a look at how we're going to process the images. Since images that are found in the wild can be wildly different sizes, aspect ratios, and orientations we can and should train on as much variety as we can. ImageNet is no exception here. The average resolution is 496x387, and as interesting as that factoid might be, the bottom line is that you have a lot of variation.
#
# As the training images are ingested we would want to conform them to a standard size. The most direct process of doing so could follow a simple ingest where you transform the image to 256x256. We talked about the drawbacks of doing this in [Image Pre-Processing](Image_Pre-Processing_Pipeline.ipynb). Therefore for more accurate results, we should probably rescale, then crop. Even this approach with cropping has the drawbacks of losing some info from the original photo. What get chopped off doesn't make into the training data. If you ran the pre-processing tutorial on the image of the astronauts you will recall that some of the astronauts didn't make the cut. Where'd they go? Wash-out lane? Planet of the Apes? If your model was to detect people, then those lost astronauts would not be getting due credit when you run inference or face detection later using the model.
#
# ### Introducing... the ImageInput Operator
#
# What could be seen as a loss turns into an opportunity. You can crop randomly around the image to create many deriviates of the original image, boosting your training data set, thereby adding robustness to the model. What if the image only has half a car or the front of a boat? You still want your model to be able to detect it! In the image below only the front a boat is shown and the model shows a 50% confidence in detection.
#
# 
#
# Caffe2 has a solution for this in its [`ImageInput` operator](https://github.com/caffe2/caffe2/blob/master/caffe2/image/image_input_op.h), a C++ image manipulation op that's used under the hood of several of the Caffe2 Python APIs.
#
# Here is a reference implementation:
#
# ```python
# def add_image_input_ops(model):
# # utilize the ImageInput operator to prep the images
# data, label = model.ImageInput(
# reader,
# ["data", "label"],
# batch_size=batch_per_device,
# # mean: to remove color values that are common
# mean=128.,
# # std is going to be modified randomly to influence the mean subtraction
# std=128.,
# # scale to rescale each image to a common size
# scale=256,
# # crop to the square each image to exact dimensions
# crop=224,
# # not running in test mode
# is_test=False,
# # mirroring of the images will occur randomly
# mirror=1
# )
# # prevent back-propagation: optional performance improvement; may not be observable at small scale
# data = model.StopGradient(data, data)
# ```
#
# * mean: remove info that's common in most images
# * std: used to create a randomization for both cropping and mirroring
# * scale: downres each image so that its shortest side matches this base resolution
# * crop: the image size we want every image to be (using random crops from the scaled down image)
# * mirror: randomly mirror the images so we can train on both representations
#
# The [`StopGradient` operator](https://caffe2.ai/docs/operators-catalogue.html#stopgradient) does no numerical computation. It is used here to prevent back propagation which isn't wanted in this network.
#
# ### Task: Implement the InputImage Operator
# Use the Work Area below to finish the stubbed out function. Refer to the reference implementation for help on this task.
#
# * What happens if you don't add a mean, don't add a std, or don't mirror. How does this change your accuracy when you run it for many epochs?
# * What would happen if we didn't do StopGradient?
# In[ ]:
# LAB WORK AREA FOR PART 4
def add_image_input_ops(model):
raise NotImplementedError # Remove this from the function stub
# ## Part 5: Creating a Residual Network
#
# Now you get the opportunity to use Caffe2's Resnet-50 creation function! During our Setup we `from caffe2.python.models import resnet`. We can use that for our `create_resnet50_model_ops` function that we still need to create and the main part of that will be the `resnet.create_resnet50()` function as described below:
#
# ```python
# create_resnet50(
# model,
# data,
# num_input_channels,
# num_labels,
# label=None,
# is_test=False,
# no_loss=False,
# no_bias=0,
# conv1_kernel=7,
# conv1_stride=2,
# final_avg_kernel=7
# )
# ```
#
# Below is a reference implementation of the function using `resnet.create_resnet50()`.
#
# ```python
# def create_resnet50_model_ops(model, loss_scale):
# # Creates a residual network
# [softmax, loss] = resnet.create_resnet50(
# model,
# "data",
# num_input_channels=3,
# num_labels=num_labels,
# label="label",
# )
# prefix = model.net.Proto().name
# loss = model.Scale(loss, prefix + "_loss", scale=loss_scale)
# model.Accuracy([softmax, "label"], prefix + "_accuracy")
# return [loss]
# ```
#
# ### Task: Implement the forward_pass_builder_fun Using Resnet-50
# In the code block above where we stubbed out the `create_resnet50_model_ops` function, utilize `resnet.create_resnet50()` to create a residual network, then returning the loss. Refer to the reference implementation for help on this task.
#
# * Bonus points: if you take a look at the resnet class in the Caffe2 docs you'll notice a function to create a 32x32 model. Try it out.
# In[ ]:
# LAB WORK AREA FOR PART 5
def create_resnet50_model_ops(model, loss_scale):
raise NotImplementedError #remove this from the function stub
# ## Part 6: Make the Network Learn
#
#
# Caffe2 model helper object has several built in functions that will help with this learning by using backpropagation where it will be adjusting weights as it runs through iterations.
#
# * AddWeightDecay
# * Iter
# * net.LearningRate
#
# Below is a reference implementation:
#
# ```python
# def add_parameter_update_ops(model):
# model.AddWeightDecay(weight_decay)
# iter = model.Iter("iter")
# lr = model.net.LearningRate(
# [iter],
# "lr",
# base_lr=base_learning_rate,
# policy="step",
# stepsize=stepsize,
# gamma=0.1,
# )
# # Momentum SGD update
# for param in model.GetParams():
# param_grad = model.param_to_grad[param]
# param_momentum = model.param_init_net.ConstantFill(
# [param], param + '_momentum', value=0.0
# )
#
# # Update param_grad and param_momentum in place
# model.net.MomentumSGDUpdate(
# [param_grad, param_momentum, lr, param],
# [param_grad, param_momentum, param],
# momentum=0.9,
# # Nesterov Momentum works slightly better than standard momentum
# nesterov=1,
# )
# ```
#
# ### Task: Implement the forward_pass_builder_fun Using Resnet-50
# Several of our Configuration variables will get used in this step. Take a look at the Configuration section from Part 2 and refresh your memory. We stubbed out the `add_parameter_update_ops` function, so to finish it, utilize `model.AddWeightDecay` and set `weight_decay`. Calculate your stepsize using `int(10 * train_data_count / total_batch_size)` or pull the value from the config. Instantiate the learning iterations with `iter = model.Iter("iter")`. Use `model.net.LearningRate()` to finalize your parameter update operations. You can optionally update you SGD's momentum. It might not make a difference in this small implementation, but if you're gonna go big later, then you'll want to do this.
#
# Refer to the reference implementation for help on this task.
#
# In[ ]:
# LAB WORK AREA FOR PART 6
def add_parameter_update_ops(model):
raise NotImplementedError #remove this from the function stub
# ## Part 7: Gradient Optimization
#
# If you run the network as is you may have issues with memory. Without memory optimization we could reduce the batch size, but we shouldn't have to do that. Caffe2 has a `memonger` function for this purpose which will find ways to reuse gradients that we created. Below is a reference implementation.
#
# ```python
# def optimize_gradient_memory(model, loss):
# model.net._net = memonger.share_grad_blobs(
# model.net,
# loss,
# set(model.param_to_grad.values()),
# # Due to memonger internals, we need a namescope here. Let's make one up; we'll need it later!
# namescope="imonaboat",
# share_activations=False)
# ```
#
# ### Task: Implement memonger
# We're going to use the reference for help here, otherwise it is a little difficult to cover for the scope of this tutorial. The function is ready to go for you, but you should still soak up what's been done in this function. One of the key gotchas here is making sure you give it a namescope so that you can access the gradients you'll be creating in the next step. This name can be anything.
#
# In[ ]:
# LAB WORK AREA FOR PART 7
def optimize_gradient_memory(model, loss):
raise NotImplementedError # Remove this from the function stub
# ## Part 8: Training the Network with One GPU
#
# Now that you've established be basic components to run ResNet-50, you can try it out on one GPU. Now, this could be a lot easier just going straight into the `data_parallel_model` and all of its optimizations, but to help explain the components needed and to build the helper functions to run `GPU_Parallelize`, we may as well start simple!
#
# If you're paying attention you might be wondering about the `gpus` array we made in the config and how that might throw things off. Also, when we looked at the config earlier you may have updated `gpus[0]` to have more than one GPU. That's fine. We can leave it like that for the next part because we will force our script to use just one GPU.
#
# Let's stitch together those functions from Parts 4-7 to run our residual network! Take a look at the code below, so you understand how the pieces fit together.
#
# ```python
# # We need to give the network context and force it to run on the first GPU even if there are more.
# device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpus[0])
# # Here's where that NameScope comes into play
# with core.NameScope("imonaboat"):
# # Picking that one GPU
# with core.DeviceScope(device_opt):
# # Run our reader, and create the layers that transform the images
# add_image_input_ops(train_model)
# # Generate our residual network and return the losses
# losses = create_resnet50_model_ops(train_model)
# # Create gradients for each loss
# blobs_to_gradients = train_model.AddGradientOperators(losses)
# # Kick off the learning and managing of the weights
# add_parameter_update_ops(train_model)
# # Optimize memory usage by consolidating where we can
# optimize_gradient_memory(train_model, [blobs_to_gradients[losses[0]]])
#
# # Startup the network
# workspace.RunNetOnce(train_model.param_init_net)
# # Load all of the initial weights; overwrite lets you run this multiple times
# workspace.CreateNet(train_model.net, overwrite=True)
# ```
#
# ### Task: Pull It All Together & Run It!
#
# Things are getting a little hairy, so we gave you the full reference ready to go. Just run the code block below (hit ctrl-enter). Normally you might not use `overwrite=True` since that could be bad for what you're doing by accidentally erasing your earlier work, so try removing it and running the block multiple times to see what happens. Imagine the case where you have multiple networks going that have the same name. You don't want to overwrite, so you might want to start up a new workspace or modify the names.
# In[ ]:
# LAB WORK AREA FOR PART 8
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpus[0])
with core.NameScope("imonaboat"):
with core.DeviceScope(device_opt):
add_image_input_ops(train_model)
losses = create_resnet50_model_ops(train_model)
blobs_to_gradients = train_model.AddGradientOperators(losses)
add_parameter_update_ops(train_model)
optimize_gradient_memory(train_model, [blobs_to_gradients[losses[0]]])
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
# ## Part 8 ... part ~~2~~ Deux: Train!
# Here's the fun part where you can tinker with the number of epochs to run and mess with the display. We'll leave this for you to play with as a fait accompli since you worked so hard to get this far!
# In[ ]:
num_epochs = 1
for epoch in range(num_epochs):
# Split up the images evenly: total images / batch size
num_iters = int(train_data_count / total_batch_size)
for iter in range(num_iters):
# Stopwatch start!
t1 = time.time()
# Run this iteration!
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
# Stopwatch stopped! How'd we do?
print((
"Finished iteration {:>" + str(len(str(num_iters))) + "}/{}" +
" (epoch {:>" + str(len(str(num_epochs))) + "}/{})" +
" ({:.2f} images/sec)").
format(iter+1, num_iters, epoch+1, num_epochs, total_batch_size/dt))
# ## Part 9: Getting Parallelized
#
# You get bonus points if you can say "getting parallelized" three times fast without messing up. You just saw some interesting numbers in the last step. Take note of those and see how things scale up when we use more GPUs.
#
# We're going to use Caffe2's `data_parallel_model` and its function called `Parallelize_GPU` to help us accomplish this task. The task to setup the parallel model, not to say it fast. Here's the spec on `Parallelize_GPU`:
#
# ```python
# Parallelize_GPU(
# model_helper_obj,
# input_builder_fun,
# forward_pass_builder_fun,
# param_update_builder_fun,
# devices=range(0, workspace.NumCudaDevices()),
# rendezvous=None,
# net_type='dag',
# broadcast_computed_params=True,
# optimize_gradient_memory=False)
# ```
#
# We're not ready to just call this function though. As you can see in the second, third, and fourth input parameters, they are expecting functions to be passed to them. [More API details here.](https://caffe2.ai/doxygen-python/html/namespacedata__parallel__model.html#a1fe7262a0a66754f19998fa1603317b9) The three functions expected are:
#
# 1. `input_build_fun`: adds the input operators. Note: Remember to instantiate reader outside of this function so all GPUs share same reader object. Signature: input_builder_fun(model)
# 2. `forward_pass_builder_fun`: adds the operators to the model. Must return list of loss-blob references that are used to build the gradient. Loss scale parameter is passed, as you should scale the loss of your model by 1.0 / the total number of gpus. Signature: forward_pass_builder_fun(model, loss_scale)
# 3. `param_update_builder_fun`: adds operators that are run after gradient update, such as updating the weights and weight decaying. Signature: param_update_builder_fun(model)
#
# For the `input_build_fun` we're going to use the reader we created with `CreateDB` along with a function that leverages Caffe2's `ImageInput` operator. Sound familiar? You already did this in Part 4!
#
# For the `forward_pass_builder_fun` we need to have residual neural network. You already did this in Part 5!
#
# For the `param_update_builder_fun` we need a function to adjust the weights as the network runs. You already did this in Part 6!
#
# Let's stub out the `Parallelize_GPU` function with the parameters that we're going to use. Recall that in the setup we `from caffe2.python import data_parallel_model as dpm`, so we can use `dpm.Parallelize_GPU()` to access the `Parallelize_GPU` function. First we'll stub out the three other functions to that this expects, add the params based on these functions names and our gpu count, then come back to the lab cell below to populate them with some logic and test them. Below is a reference implementation:
#
# ```python
# dpm.Parallelize_GPU(
# train_model,
# input_builder_fun=add_image_input_ops,
# forward_pass_builder_fun=create_resnet50_model_ops,
# param_update_builder_fun=add_parameter_update_ops,
# devices=gpus,
# optimize_gradient_memory=True,
# )
# ```
#
# ### Task: Make Your Helper Functions
# You already did this the Parts 4 through 6 and in Part 7 you had to deal with gradient optimizations that are baked into `Parallelize_GPU`. The three helper function stubs below can be eliminated or if you want to see everything together go ahead and copy the functions there, so you can run them from the work area block below.
#
# ### Task: Parallelize!
# Now you can stub out a call to `Parallelize_GPU`. Use the reference implementation above if you get stuck.
# * `model_helper_object`: created in Part 3; maybe you called it taco_model, or if you weren't copying and pasting you thoughtfully called it train_model or training_model.
# * Now pass the function name for each of the three functions you just created, e.g. `input_builder_fun=add_image_input_ops`
# * `devices`: we can pass in our `gpus` array from our earlier Setup.
# * `optimize_gradient_memory`: the default is `False` but let's set it to `True`; this takes care of what we had to do in Step 7 with `memonger`.
# * other params: ignore/don't pass anything to accept their defaults
#
# In[ ]:
# LAB WORK AREA for Part 9
# Reinitializing our configuration variables to accomodate 2 (or more, if you have them) GPUs.
gpus = [0, 1]
# Batch size of 32 sums up to roughly 5GB of memory per device
batch_per_device = 32
total_batch_size = batch_per_device * len(gpus)
# This model discriminates between two labels: car or boat
num_labels = 2
# Initial learning rate (scale with total batch size)
base_learning_rate = 0.0004 * total_batch_size
# only intends to influence the learning rate after 10 epochs
stepsize = int(10 * train_data_count / total_batch_size)
# Weight decay (L2 regularization)
weight_decay = 1e-4
# Clear workspace to free network and memory allocated in previous steps.
workspace.ResetWorkspace()
# Create input_build_fun
def add_image_input_ops(model):
# This will utilize the reader to pull images and feed them to the training model's helper object
# Use the model.ImageInput operator to load data from reader & apply transformations to the images.
raise NotImplementedError # Remove this from the function stub
# Create forward_pass_builder_fun
def create_resnet50_model_ops(model, loss_scale):
# Use resnet module to create a residual net
raise NotImplementedError # Remove this from the function stub
# Create param_update_builder_fun
def add_parameter_update_ops(model):
raise NotImplementedError # Remove this from the function stub
# Create new train model
train_model = NotImplementedError
# Create new reader
reader = NotImplementedError
# Create parallelized model using dpm.Parallelize_GPU
# Use workspace.RunNetOnce and workspace.CreateNet to fire up the train network
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
# ## Part 10: Create a Test Model
#
# After every epoch of training, we like to run some validation data through our model to see how it performs.
#
# Like training, this is another net, with its own data reader. Unlike training, this net does not perform backpropagation. It only does a forward pass and compares the output of the network with the label of the validation data.
#
# You've already done these steps once before when you created the training network, so do it again, but name it something different, like "test".
#
# ### Task: Create a Test Model
#
# * Use `ModelHelper` to create a model helper object called "test"
# * Use `CreateDB` to create a reader and call it "test_reader"
# * Use `Parallelize_GPU` to parallelize the model, but set `param_update_builder_fun=None` to skip backpropagation
# * Use `workspace.RunNetOnce` and `workspace.CreateNet` to fire up the test network
# In[ ]:
# LAB WORK AREA for Part 10
# Create your test model with ModelHelper
# Create your reader with CreateDB
# Use multi-GPU with Parallelize_GPU, but don't utilize backpropagation
# Use workspace.RunNetOnce and workspace.CreateNet to fire up the test network
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# ## Get Ready to Display the Results
# At the end of every epoch we will take a look at how the network performs visually. We will also report on the accuracy of the training model and the test model. Let's not force you to write your own reporting and display code, so just run the code block below to get those features ready.
# In[ ]:
from caffe2.python import visualize
from matplotlib import pyplot as plt
def display_images_and_confidence():
images = []
confidences = []
n = 16
data = workspace.FetchBlob("gpu_0/data")
label = workspace.FetchBlob("gpu_0/label")
softmax = workspace.FetchBlob("gpu_0/softmax")
for arr in zip(data[0:n], label[0:n], softmax[0:n]):
# CHW to HWC, normalize to [0.0, 1.0], and BGR to RGB
bgr = (arr[0].swapaxes(0, 1).swapaxes(1, 2) + 1.0) / 2.0
rgb = bgr[...,::-1]
images.append(rgb)
confidences.append(arr[2][arr[1]])
# Create grid for images
fig, rows = plt.subplots(nrows=4, ncols=4, figsize=(12, 12))
plt.tight_layout(h_pad=2)
# Display images and the models confidence in their label
items = zip([ax for cols in rows for ax in cols], images, confidences)
for (ax, image, confidence) in items:
ax.imshow(image)
if confidence >= 0.5:
ax.set_title("RIGHT ({:.1f}%)".format(confidence * 100.0), color='green')
else:
ax.set_title("WRONG ({:.1f}%)".format(confidence * 100.0), color='red')
plt.show()
def accuracy(model):
accuracy = []
prefix = model.net.Proto().name
for device in model._devices:
accuracy.append(
np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
return np.average(accuracy)
# ## Part 11: Run Multi-GPU Training and Get Test Results
# You've come a long way. Now is the time to see it all pay off. Since you already ran ResNet once, you can glance at the code below and run it. The big difference this time is your model is parallelized!
#
# The additional components at the end deal with accuracy so you may want to dig into those specifics as a bonus task. You can try it again: just adjust the `num_epochs` value below, run the block, and see the results. You can also go back to Part 10 to reinitialize the model, and run this step again. (You may want to add `workspace.ResetWorkspace()` before you run the new models again.)
#
# Go back and check the images/sec from when you ran single GPU. Note how you can scale up with a small amount of overhead.
#
# ### Task: How many GPUs would it take to train ImageNet in under a minute?
# In[ ]:
# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy
num_epochs = 2
for epoch in range(num_epochs):
# Split up the images evenly: total images / batch size
num_iters = int(train_data_count / total_batch_size)
for iter in range(num_iters):
# Stopwatch start!
t1 = time.time()
# Run this iteration!
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
# Stopwatch stopped! How'd we do?
print((
"Finished iteration {:>" + str(len(str(num_iters))) + "}/{}" +
" (epoch {:>" + str(len(str(num_epochs))) + "}/{})" +
" ({:.2f} images/sec)").
format(iter+1, num_iters, epoch+1, num_epochs, total_batch_size/dt))
# Get the average accuracy for the training model
train_accuracy = accuracy(train_model)
# Run the test model and assess accuracy
test_accuracies = []
for _ in range(test_data_count / total_batch_size):
# Run the test model
workspace.RunNet(test_model.net.Proto().name)
test_accuracies.append(accuracy(test_model))
test_accuracy = np.average(test_accuracies)
print(
"Train accuracy: {:.3f}, test accuracy: {:.3f}".
format(train_accuracy, test_accuracy))
# Output images with confidence scores as the caption
display_images_and_confidence()
# If you enjoyed this tutorial and would like to see it in action in a different way, check Caffe2's Python examples to try a [script version](https://github.com/caffe2/caffe2/blob/master/caffe2/python/examples/resnet50_trainer.py) of this multi-GPU trainer. We also have some more info below in the Appendix and a Solutions section that you can use to run the expected output of this tutorial.
# ## Appendix
# Here are a few things you may want to play with.
#
# ### Explore the workspace and the protobuf outputs
# In[ ]:
print(str(train_model.param_init_net.Proto())[:1000] + '\n...')
# ## Solutions
# This section below contains working examples for your reference. You should be able to execute these cells in order and see the expected output. **Note: this assumes you have at least 2 GPUs**
# In[ ]:
# SOLUTION for Part 1
from caffe2.python import core, workspace, model_helper, net_drawer, memonger, brew
from caffe2.python import data_parallel_model as dpm
from caffe2.python.models import resnet
from caffe2.proto import caffe2_pb2
import numpy as np
import time
import os
from IPython import display
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
# This section checks if you have the training and testing databases
current_folder = os.path.join(os.path.expanduser('~'), 'caffe2_notebooks')
data_folder = os.path.join(current_folder, 'tutorial_data', 'resnet_trainer')
# Train/test data
train_data_db = os.path.join(data_folder, "imagenet_cars_boats_train")
train_data_db_type = "lmdb"
# actually 640 cars and 640 boats = 1280
train_data_count = 1280
test_data_db = os.path.join(data_folder, "imagenet_cars_boats_val")
test_data_db_type = "lmdb"
# actually 48 cars and 48 boats = 96
test_data_count = 96
# Get the dataset if it is missing
def DownloadDataset(url, path):
import requests, zipfile, StringIO
print("Downloading {} ... ".format(url))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Done downloading to {}!".format(path))
# Make the data folder if it doesn't exist
if not os.path.exists(data_folder):
os.makedirs(data_folder)
else:
print("Data folder found at {}".format(data_folder))
# See if you already have to db, and if not, download it
if not os.path.exists(train_data_db):
DownloadDataset("https://download.caffe2.ai/databases/resnet_trainer.zip", data_folder)
# In[ ]:
# PART 1 TROUBLESHOOTING
# lmdb error or unable to open database: look in the database folder from terminal and (sudo) delete the lock file and try again
# In[ ]:
# SOLUTION for Part 2
# Configure how you want to train the model and with how many GPUs
# This is set to use two GPUs in a single machine, but if you have more GPUs, extend the array [0, 1, 2, n]
gpus = [0, 1]
# Batch size of 32 sums up to roughly 5GB of memory per device
batch_per_device = 32
total_batch_size = batch_per_device * len(gpus)
# This model discriminates between two labels: car or boat
num_labels = 2
# Initial learning rate (scale with total batch size)
base_learning_rate = 0.0004 * total_batch_size
# only intends to influence the learning rate after 10 epochs
stepsize = int(10 * train_data_count / total_batch_size)
# Weight decay (L2 regularization)
weight_decay = 1e-4
# In[ ]:
# SOLUTION for Part 3
workspace.ResetWorkspace()
# 1. Use the model helper to create a CNN for us
train_model = model_helper.ModelHelper(
# Arbitrary name for referencing the network in your workspace: you could call it tacos or boatzncarz
name="train",
)
# 2. Create a database reader
# This training data reader is shared between all GPUs.
# When reading data, the trainer runs ImageInputOp for each GPU to retrieve their own unique batch of training data.
# CreateDB is inherited by ModelHelper from model_helper.py
# We are going to name it "train_reader" and pass in the db configurations we set earlier
reader = train_model.CreateDB(
"train_reader",
db=train_data_db,
db_type=train_data_db_type,
)
# In[ ]:
# SOLUTION for Part 4
def add_image_input_ops(model):
# utilize the ImageInput operator to prep the images
data, label = brew.image_input(
model,
reader,
["data", "label"],
batch_size=batch_per_device,
# mean: to remove color values that are common
mean=128.,
# std is going to be modified randomly to influence the mean subtraction
std=128.,
# scale to rescale each image to a common size
scale=256,
# crop to the square each image to exact dimensions
crop=224,
# not running in test mode
is_test=False,
# mirroring of the images will occur randomly
mirror=1
)
# prevent back-propagation: optional performance improvement; may not be observable at small scale
data = model.net.StopGradient(data, data)
# In[ ]:
# SOLUTION for Part 5
def create_resnet50_model_ops(model, loss_scale=1.0):
# Creates a residual network
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=num_labels,
label="label",
)
prefix = model.net.Proto().name
loss = model.net.Scale(loss, prefix + "_loss", scale=loss_scale)
brew.accuracy(model, [softmax, "label"], prefix + "_accuracy")
return [loss]
# In[ ]:
# SOLUTION for Part 6
def add_parameter_update_ops(model):
brew.add_weight_decay(model, weight_decay)
iter = brew.iter(model, "iter")
lr = model.net.LearningRate(
[iter],
"lr",
base_lr=base_learning_rate,
policy="step",
stepsize=stepsize,
gamma=0.1,
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, lr, param],
[param_grad, param_momentum, param],
# almost 100% but with room to grow
momentum=0.9,
# netsterov is a defenseman for the Montreal Canadiens, but
# Nesterov Momentum works slightly better than standard momentum
nesterov=1,
)
# In[ ]:
# SOLUTION for Part 7
def optimize_gradient_memory(model, loss):
model.net._net = memonger.share_grad_blobs(
model.net,
loss,
set(model.param_to_grad.values()),
namescope="imonaboat",
share_activations=False,
)
# In[ ]:
# SOLUTION for Part 8
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpus[0])
with core.NameScope("imonaboat"):
with core.DeviceScope(device_opt):
add_image_input_ops(train_model)
losses = create_resnet50_model_ops(train_model)
blobs_to_gradients = train_model.AddGradientOperators(losses)
add_parameter_update_ops(train_model)
optimize_gradient_memory(train_model, [blobs_to_gradients[losses[0]]])
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
# In[ ]:
# SOLUTION for Part 8 Part Deux
num_epochs = 1
for epoch in range(num_epochs):
# Split up the images evenly: total images / batch size
num_iters = int(train_data_count / batch_per_device)
for iter in range(num_iters):
# Stopwatch start!
t1 = time.time()
# Run this iteration!
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
# Stopwatch stopped! How'd we do?
print((
"Finished iteration {:>" + str(len(str(num_iters))) + "}/{}" +
" (epoch {:>" + str(len(str(num_epochs))) + "}/{})" +
" ({:.2f} images/sec)").
format(iter+1, num_iters, epoch+1, num_epochs, batch_per_device/dt))
# In[ ]:
# SOLUTION for Part 9 Prep
# Reinitializing our configuration variables to accomodate 2 (or more, if you have them) GPUs.
gpus = [0, 1]
# Batch size of 32 sums up to roughly 5GB of memory per device
batch_per_device = 32
total_batch_size = batch_per_device * len(gpus)
# This model discriminates between two labels: car or boat
num_labels = 2
# Initial learning rate (scale with total batch size)
base_learning_rate = 0.0004 * total_batch_size
# only intends to influence the learning rate after 10 epochs
stepsize = int(10 * train_data_count / total_batch_size)
# Weight decay (L2 regularization)
weight_decay = 1e-4
# Reset workspace to clear out memory allocated during our first run.
workspace.ResetWorkspace()
# 1. Use the model helper to create a CNN for us
train_model = model_helper.ModelHelper(
# Arbitrary name for referencing the network in your workspace: you could call it tacos or boatzncarz
name="train",
)
# 2. Create a database reader
# This training data reader is shared between all GPUs.
# When reading data, the trainer runs ImageInputOp for each GPU to retrieve their own unique batch of training data.
# CreateDB is inherited by cnn.ModelHelper from model_helper.py
# We are going to name it "train_reader" and pass in the db configurations we set earlier
reader = train_model.CreateDB(
"train_reader",
db=train_data_db,
db_type=train_data_db_type,
)
# In[ ]:
# SOLUTION for Part 9
# assumes you're using the functions created in Part 4, 5, 6
dpm.Parallelize_GPU(
train_model,
input_builder_fun=add_image_input_ops,
forward_pass_builder_fun=create_resnet50_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=gpus,
optimize_gradient_memory=True,
)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# In[ ]:
# SOLUTION for Part 10
test_model = model_helper.ModelHelper(
name="test",
)
reader = test_model.CreateDB(
"test_reader",
db=test_data_db,
db_type=test_data_db_type,
)
# Validation is parallelized across devices as well
dpm.Parallelize_GPU(
test_model,
input_builder_fun=add_image_input_ops,
forward_pass_builder_fun=create_resnet50_model_ops,
param_update_builder_fun=None,
devices=gpus,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
# In[ ]:
# SOLUTION for Part 10 - display reporting setup
from caffe2.python import visualize
from matplotlib import pyplot as plt
def display_images_and_confidence():
images = []
confidences = []
n = 16
data = workspace.FetchBlob("gpu_0/data")
label = workspace.FetchBlob("gpu_0/label")
softmax = workspace.FetchBlob("gpu_0/softmax")
for arr in zip(data[0:n], label[0:n], softmax[0:n]):
# CHW to HWC, normalize to [0.0, 1.0], and BGR to RGB
bgr = (arr[0].swapaxes(0, 1).swapaxes(1, 2) + 1.0) / 2.0
rgb = bgr[...,::-1]
images.append(rgb)
confidences.append(arr[2][arr[1]])
# Create grid for images
fig, rows = plt.subplots(nrows=4, ncols=4, figsize=(12, 12))
plt.tight_layout(h_pad=2)
# Display images and the models confidence in their label
items = zip([ax for cols in rows for ax in cols], images, confidences)
for (ax, image, confidence) in items:
ax.imshow(image)
if confidence >= 0.5:
ax.set_title("RIGHT ({:.1f}%)".format(confidence * 100.0), color='green')
else:
ax.set_title("WRONG ({:.1f}%)".format(confidence * 100.0), color='red')
plt.show()
def accuracy(model):
accuracy = []
prefix = model.net.Proto().name
for device in model._devices:
accuracy.append(
np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
return np.average(accuracy)
# In[ ]:
# SOLUTION for Part 11
# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy
num_epochs = 2
for epoch in range(num_epochs):
# Split up the images evenly: total images / batch size
num_iters = int(train_data_count / total_batch_size)
for iter in range(num_iters):
# Stopwatch start!
t1 = time.time()
# Run this iteration!
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
# Stopwatch stopped! How'd we do?
print((
"Finished iteration {:>" + str(len(str(num_iters))) + "}/{}" +
" (epoch {:>" + str(len(str(num_epochs))) + "}/{})" +
" ({:.2f} images/sec)").
format(iter+1, num_iters, epoch+1, num_epochs, total_batch_size/dt))
# Get the average accuracy for the training model
train_accuracy = accuracy(train_model)
# Run the test model and assess accuracy
test_accuracies = []
for _ in range(test_data_count / total_batch_size):
# Run the test model
workspace.RunNet(test_model.net.Proto().name)
test_accuracies.append(accuracy(test_model))
test_accuracy = np.average(test_accuracies)
print(
"Train accuracy: {:.3f}, test accuracy: {:.3f}".
format(train_accuracy, test_accuracy))
# Output images with confidence scores as the caption
display_images_and_confidence()
# ### TO DO:
# (or things to explore on your own to improve this tutorial!)
# * Create your own database of images
# * Explore the layers
# * Print out images of the intermediates/activations to show what's happening under the hood
# * Make some interactions between epochs (change of params to show impact)
|
Yangqing/caffe2
|
caffe2/python/tutorials/py_gen/Multi-GPU_Training.py
|
Python
|
apache-2.0
| 50,348
|
[
"TINKER"
] |
e26c790b3b1dc301b004072d3cbaca471007dc185d8db173e2686753b045d5ef
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.cbook import MatplotlibDeprecationWarning
with warnings.catch_warnings():
# the module is deprecated. The tests should be removed when the module is.
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/matplotlib/tests/test_delaunay.py
|
Python
|
mit
| 7,137
|
[
"Gaussian"
] |
2b3728890a640e1b5504c33808286744e72f9af790b5b26fea4a4c1ca5cef85f
|
"""
Module containing wrappers to create, load, simulate, analyze networks
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
#------------------------------------------------------------------------------
# Wrapper to create network
#------------------------------------------------------------------------------
def create(netParams=None, simConfig=None, output=False):
"""
Wrapper function to create a simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, rxd, stims, simData)``
"""
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output:
return (pops, cells, conns, rxd, stims, simData)
#------------------------------------------------------------------------------
# Wrapper to simulate network
#------------------------------------------------------------------------------
def simulate():
"""
Wrapper function to run a simulation and gather the data
"""
from .. import sim
sim.runSim()
sim.gatherData() # gather spiking data and cell info from each node
#------------------------------------------------------------------------------
# Wrapper to simulate network
#------------------------------------------------------------------------------
def intervalSimulate(interval):
"""
Wrapper function to run a simulation at intervals and gather the data from files
Parameters
----------
interval : number
The time interval at which to save data files.
**Default:** *required*.
"""
from .. import sim
sim.runSimWithIntervalFunc(interval, sim.intervalSave) # run parallel Neuron simulation
sim.fileGather() # gather spiking data and cell info from saved file
#------------------------------------------------------------------------------
# Wrapper to simulate network
#------------------------------------------------------------------------------
def distributedSimulate(filename=None, dataDir=None, includeLFP=True):
"""
Wrapper function to run a simulation and save/load data to/from files by node
Parameters
----------
filename : str
name of saved data files.
**Default:** ``None`` uses the name of the simulation.
dataDir : str
name of directory to save data to and load data from.
**Default:** ``None`` uses the simulation name.
includeLFP : bool
whether or not to include LFP data
**Default:** ``True`` includes LFP data if available.
"""
from .. import sim
sim.runSim()
sim.saveDataInNodes(filename=filename, saveLFP=includeLFP, removeTraces=False, dataDir=dataDir)
sim.gatherDataFromFiles(gatherLFP=includeLFP, dataDir=dataDir)
#------------------------------------------------------------------------------
# Wrapper to analyze network
#------------------------------------------------------------------------------
def analyze():
"""
Wrapper function to analyze and plot simulation data
"""
from .. import sim
sim.saveData() # run parallel Neuron simulation
sim.analysis.plotData() # gather spiking data and cell info from each node
#------------------------------------------------------------------------------
# Wrapper to create, simulate, and analyse network
#------------------------------------------------------------------------------
def createSimulate(netParams=None, simConfig=None, output=False):
"""
Wrapper function to create and run a simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, stims, simData)``
"""
from .. import sim
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
sim.simulate()
if output:
return (pops, cells, conns, stims, simData)
#------------------------------------------------------------------------------
# Wrapper to create, simulate, and analyse network
#------------------------------------------------------------------------------
def createSimulateAnalyze(netParams=None, simConfig=None, output=False):
"""
Wrapper function run and analyze a simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, stims, simData)``
"""
from .. import sim
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
sim.simulate()
sim.analyze()
if output:
return (pops, cells, conns, stims, simData)
#------------------------------------------------------------------------------
# Wrapper to create, simulate, and analyse network, while saving to master in intervals
#------------------------------------------------------------------------------
def createSimulateAnalyzeInterval(netParams, simConfig, output=False, interval=None):
"""
Wrapper function to run a simulation saving data at time intervals
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
interval : number
The time interval (in ms) to record for.
**Default:** ``None`` records the entire simulation in one file.
**Options:** ``number`` records the simulation into multiple files split at ``number`` ms.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, stims, simData)``
"""
import os
from .. import sim
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
try:
if sim.rank==0:
if os.path.exists('temp'):
for f in os.listdir('temp'):
os.unlink('temp/{}'.format(f))
else:
os.mkdir('temp')
sim.intervalSimulate(interval)
except Exception as e:
print(e)
return
sim.pc.barrier()
sim.analyze()
if output:
return (pops, cells, conns, stims, simData)
#------------------------------------------------------------------------------
# Wrapper to create, simulate, and analyse network, while saving to master in intervals
#------------------------------------------------------------------------------
def createSimulateAnalyzeDistributed(netParams, simConfig, output=False, filename=None, dataDir=None, includeLFP=True):
"""
Wrapper function to run a simulation saving data in each node
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
filename : str
name of saved data files.
**Default:** ``None`` uses the name of the simulation.
dataDir : str
name of directory to save data to.
**Default:** ``None`` uses the simulation name.
includeLFP : bool
whether or not to include LFP data
**Default:** ``True`` includes LFP data if available.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, stims, simData)``
"""
import os
from .. import sim
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
sim.runSim()
sim.saveDataInNodes(filename=filename, saveLFP=includeLFP, removeTraces=False, dataDir=dataDir)
sim.gatherDataFromFiles(gatherLFP=includeLFP, dataDir=dataDir)
sim.saveData()
sim.analysis.plotData()
if output:
return (pops, cells, conns, stims, simData)
#------------------------------------------------------------------------------
# Wrapper to load all, ready for simulation
#------------------------------------------------------------------------------
def load(filename, simConfig=None, output=False, instantiate=True, instantiateCells=True, instantiateConns=True, instantiateStims=True, instantiateRxD=True, createNEURONObj=True):
"""
Wrapper function to load a simulation from file
Parameters
----------
filename : str
name of data file to load.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** ``None`` uses the current ``simConfig``.
output : bool
whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
instantiate : bool
whether or not to instantiate the model.
**Default:** ``True`` instantiates the model.
instantiateCells : bool
whether or not to instantiate the cells.
**Default:** ``True`` instantiates the cells.
instantiateConns : bool
whether or not to instantiate the connections.
**Default:** ``True`` instantiates the connections.
instantiateStims: bool
whether or not to instantiate the stimulations.
**Default:** ``True`` instantiates the stimulations.
instantiateRxD : bool
whether or not to instantiate the reaction-diffusion.
**Default:** ``True`` instantiates the reaction-diffusion.
createNEURONObj : bool
whether or not to create NEURON objects for the simulation.
**Default:** ``True`` creates NEURON objects.
Returns
-------
data : tuple
If ``output`` is ``True``, returns (pops, cells, conns, stims, rxd, simData)
"""
from .. import sim
sim.initialize() # create network object and set cfg and net params
sim.cfg.createNEURONObj = createNEURONObj
sim.loadAll(filename, instantiate=instantiate, createNEURONObj=createNEURONObj)
if simConfig: sim.setSimCfg(simConfig) # set after to replace potentially loaded cfg
if len(sim.net.cells) == 0 and instantiate:
pops = sim.net.createPops() # instantiate network populations
if instantiateCells:
cells = sim.net.createCells() # instantiate network cells based on defined populations
if instantiateConns:
conns = sim.net.connectCells() # create connections between cells based on params
if instantiateStims:
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
if instantiateRxD:
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output:
try:
return (pops, cells, conns, stims, rxd, simData)
except:
pass
#------------------------------------------------------------------------------
# Wrapper to load net and simulate
#------------------------------------------------------------------------------
def loadSimulate(filename, simConfig=None, output=False):
"""
Wrapper function to load a simulation from file and simulate it
Parameters
----------
filename : str
name of data file to load.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** ``None`` uses the current ``simConfig``.
output : bool
whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
Returns
-------
data : tuple
If ``output`` is ``True``, returns (pops, cells, conns, stims, rxd, simData)
"""
from .. import sim
sim.load(filename, simConfig)
sim.simulate()
if output:
try:
return (pops, cells, conns, stims, rxd, simData)
except:
pass
#------------------------------------------------------------------------------
# Wrapper to load net and simulate
#------------------------------------------------------------------------------
def loadSimulateAnalyze(filename, simConfig=None, output=False):
"""
Wrapper function to load a simulation from file and simulate and anlyze it
Parameters
----------
filename : str
name of data file to load.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** ``None`` uses the current ``simConfig``.
output : bool
whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
Returns
-------
data : tuple
If ``output`` is ``True``, returns ``(pops, cells, conns, stims, simData)``
"""
from .. import sim
sim.load(filename, simConfig)
sim.simulate()
sim.analyze()
if output:
try:
return (pops, cells, conns, stims, rxd, simData)
except:
pass
#------------------------------------------------------------------------------
# Wrapper to create and export network to NeuroML2
#------------------------------------------------------------------------------
def createExportNeuroML2(netParams=None, simConfig=None, output=False, reference=None, connections=True, stimulations=True, format='xml'):
"""
Wrapper function create and export a NeuroML2 simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
reference : str
Will be used for id of the network
connections : bool
Should connections also be exported?
**Default:** ``True``
stimulations : bool
Should stimulations (current clamps etc) also be exported?
**Default:** ``True``
format : str
Which format, xml or hdf5
**Default:** ``'xml'``
**Options:** ``'xml'`` Export as XML format ``'hdf5'`` Export as binary HDF5 format
Returns
-------
data : tuple
If ``output`` is ``True``, returns (pops, cells, conns, stims, rxd, simData)
"""
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.exportNeuroML2(reference, connections, stimulations,format) # export cells and connectivity to NeuroML 2 format
if output:
return (pops, cells, conns, stims, rxd, simData)
#------------------------------------------------------------------------------
# Wrapper to import network from NeuroML2
#------------------------------------------------------------------------------
def importNeuroML2SimulateAnalyze(fileName, simConfig):
"""
Wrapper function to import, simulate, and analyze from a NeuroML2 file
Parameters
----------
filename : str
name of data file to load.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
"""
from .. import sim
return sim.importNeuroML2(fileName, simConfig, simulate=True, analyze=True)
def runSimIntervalSaving(interval=1000):
"""
Wrapper function to run a simulation while saving data at intervals
"""
from .. import sim
sim.runSimWithIntervalFunc(interval, sim.intervalSave)
|
Neurosim-lab/netpyne
|
netpyne/sim/wrappers.py
|
Python
|
mit
| 19,548
|
[
"NEURON"
] |
e7b0d1772803249d54c3483b77552be7ead4ea86c9a11c7ed5af90871a2ba7a6
|
# coding: utf8
from __future__ import unicode_literals, print_function, division
from unittest import TestCase
from cdk.scripts.util import Headword, yield_variants, yield_examples
class HeadwordTests(TestCase):
def test_Headword(self):
w = Headword('ambel <rus.>')
self.assertEqual(w.donor, 'rus')
w = Headword('anát-qodes (nket., sket. anát-qɔrεs, cket. anát-qɔdεs)')
self.assertEqual(w.form, 'anát-qodes')
self.assertEqual(w.dialects, [])
self.assertIn('sket', w.variants)
w = Headword('aduŋu I (nket. aruŋ, cket. aduŋu, sket. aruŋu)')
self.assertEqual(w.disambiguation, 'I')
w = Headword('albed1 (cket. alʲəbɛt) III')
self.assertEqual(w.dialects, [])
self.assertIn('cket', w.variants)
self.assertEqual(w.disambiguation, '1 III')
w = Headword('albed also something also else (cket. alʲəbɛt)')
self.assertEqual(len(w.variants[None]), 2)
w = Headword('estij (cket. ε(j)štij) I')
self.assertEqual(w.variants['cket'], ['ε(j)štij'])
w = Headword('boltaq1 (nket.)')
self.assertEqual(w.dialects, ['nket'])
def test_examples(self):
s = "kel. kinij aqta ā сегодня сильная жара, kel. sʲīlʲɛ ā летом жара, kel. " \
"ugbinut adiŋalʲ потеряла сознание от жары, bak. ā baŋga dɨnlitdiŋta ɛnam" \
" во время жары в еловом лесу прохладно kinij qɔŋa qà ā, kεˀt bǝ̄nʲ " \
"dilsivɛt cегодня невыносимая жара и духота, человек не вздохнёт (КФТ: 82) "
l = list(yield_examples(s))
self.assertEqual(set(o[0] for o in l if o[0] is not None), {'kel', 'bak'})
self.assertEqual(l[-1][3], 'КФТ')
self.assertEqual(l[-1][4], '82')
self.assertEqual(l[-1][2], 'cегодня невыносимая жара и духота, человек не вздохнёт')
s = "sur. bāt aːtɔʁɔn, dēsʲ ā rʲa-haqtɔlʲaŋ лоб вспотел, глаза пот ослепил," \
" sul. adiŋta kʌma hʌˀq tabdaq в поту (их шерсть) преет [выпадает], kel." \
" bū ā aːtɔʁɔn-qɔn (t)lɔvεrɔlʲbεt он до пота [пока пот не пошёл] работал" \
" tɨvak bʌjbulʲ āt indaq, āt ā kʌma dabbεt пучок [косичку] стружки дай" \
" мне, я пот вытру (СНСС76: 11), sur. ā atpadaq batatdiŋɛl пот льёт с" \
" лица (ЛЯНС11: 456) "
l = list(yield_examples(s))
self.assertEqual(set(o[3] for o in l if o[3] is not None), {'СНСС76', 'ЛЯНС11'})
s = "sur. kinij ā iˀ сегодня жаркий день, sul. εnqɔŋ iˀ atusʲ сегодня день " \
"жаркий qasέŋ aqtam, ʌtnnaŋta qasέŋ aɣam, ūlʲ aːŋam там хорошо, у нас " \
"там жарко, вода тёплая (СНСС81: 52)"
l = list(yield_examples(s))
s = "kel. abεskij dɛˀŋ ɔna diːmεsin блуждающие [заблудившиеся] люди только " \
"пришли, kel. āt abεskij sɛ̀lʲ dɔːnbʌk я заблудившегося оленя нашёл " \
"sul. abɛskij kʲεˀt заблудившийся человек (АК1: 12б)"
l = list(yield_examples(s))
self.assertEqual(set(o[0] for o in l if o[0] is not None), {'kel', 'sul'})
self.assertEqual(set(o[3] for o in l if o[3] is not None), {'АК1'})
s = "kel. āt utpaɣan я слепая, " \
"kel. āt dassanɔɣavεt я охочусь, " \
"kel. abaŋa ɨ̄nʲ qimdɨlʲgat у меня двое девочек, " \
"kel. ukuŋa aslɛnaŋ usʲaŋ? – abaŋa usʲaŋ, aqta aslɛnaŋ у тебя лодка есть? – у меня есть, хорошая лодка, " \
"bak. abaŋa aqtam, ǝ̄k kiːnbεsʲin мне хорошо, (что) вы пришли, " \
"kur. ūlʲ abaŋa bǝ̄nʲ (k)qʌtsʲigɛt? воды мне не дашь? " \
"kur. āb bisʲɛp abaŋa qānʲ durɔq мой брат ко мне пусть прилетит, " \
"kel. abaŋa ana nara? мне кто нужен? pak. idiŋ abaŋa bʌnʲsʲaŋ daŋal писем мне нет от него, " \
"kur. abaŋta kʌˀt usʲaŋ у меня дети есть, " \
"kur. abaŋta dɔˀŋ hunʲaŋ ovɨlda у меня три дочери было, " \
"kur. āt (t)kajnam hɔlaq, patrɔ́naŋ abaŋta usʲaŋ я взял порох, патроны у меня есть, " \
"bak. lɔbɛt abaŋta baˀt ɔnʲaŋ работы у меня, правда, много, " \
"bak. abaŋta tʌˀ kɔbda-qɔ у меня соли пригоршня, " \
"pak. abaŋta ɔbɨlʲda qīp у меня был дед, " \
"kel. abaŋta qɔˀk huˀnʲ у меня одна дочь, " \
"pak. tīp abaŋta diːmbεsʲ собака ко мне пришла, " \
"kur. abaŋal dɔˀŋ dɨlʲgat от меня трое детей, " \
"sul. āb bisʲɛp abaŋal aqtarʲa моя сестра меня лучше, " \
"bak. ə̄k abat (k)sʲaŋsiɣɛtin? вы меня ищете? " \
"kur. hissɛj abat iʁusʲ лес для меня дом, " \
"sur. diːmbεsʲ adas он пришёл со мной"
l = list(yield_examples(s))
s = "sul. āb arʲεŋ мои кости, " \
"pak. qūsʲ aˀt одна кость, " \
"kel. aˀt qusʲam кость одна, " \
"pak. qāk adεŋ пять костей, " \
"kel. qà aˀt большая кость, " \
"kel. aˀt qàsʲ кость большая, " \
"kel. ilʲiŋ aˀt обглоданная кость, " \
"leb. aˀt ilʲ кость грызи, " \
"kur. qɔbɛt aˀt спинной хребет [кость], " \
"kur. bɔŋda arεŋ мертвеца скелет [кости], " \
"kel. hʌŋnd aˀt плечевой сустав [плеча кость], " \
"kel. ɨlʲgat(d) aˀt ключица, " \
"sul. ɔkdaŋtan arʲεŋ bʌnsʲa у стерляди костей нет, " \
"kel. āt ulʲbaɣɔlʲta, barεŋ binʲtʌːlʲ я промок под дождем, промёрз до костей [кости мои замерзли], " \
"sur. būŋ tusʲaŋ dʌʁaŋgɔʁɔn buŋna dεŋna adεŋdiŋta они там жить стали, где кости их людей " \
"sʲī haj aɣa ɔɣɔn, daɔbda adεŋdiŋa haj (t)tɔlatn ночью он снова на гору [вверх на берег в лес] ушёл, на кладбище [к костям] своего отца, снова лёг (КСД: 35)"
l = list(yield_examples(s))
s = "аl. buda aˀt его рост, " \
"pak. báàt bǝ̄nʲ qà aˀt старик небольшого роста, " \
"mad. tur báàt tɨŋalʲ aˀt этот старик высокого роста, " \
"kur. tur báàt bǝ̄nʲ ugda aˀt этот старик небольшого [не длинного] роста, " \
"kur. bū sʲutn aˀt он среднего роста, " \
"kur. bū hʌna aˀt он маленького роста, " \
"sur. εjɣε bɔŋsʲúːlʲ (t)biːlεbεt bind atdas он железный гроб [мертвячью нарту] сделал в свой рост"
l = list(yield_examples(s))
s = "dɔlʲdin vasʲka qimas àl sɛnnusʲdiŋta жили Васька с женой в лесу в оленьем сарае (КФТ: 29) " \
"dɔlʲdin vasʲka qimas àl sɛnnusʲdiŋta жили Васька с женой в лесу в оленьем сарае (КФТ: 29), " \
"sur. lɛska àl tam ana qɛ̀ dɛːsij, dɛˀŋ aŋgábdǝ в лесу кто-то громким голосом кричит, люди услыхали (ЛЯНС11: 154)"
l = list(yield_examples(s))
self.assertEqual(set(o[0] for o in l if o[0] is not None), {'sur'})
self.assertEqual(set(o[3] for o in l if o[3] is not None), {'КФТ', 'ЛЯНС11'})
s = "kel. tɨˀn àl usʲna котёл прочь сними (с огня), " \
"kel. àl εsʲandaq подальше положи, " \
"kel. àl εsʲandaq, qɨ̄nʲ aqán da-bugbiʁus положи подальше, течение чтобы не унесло [пусть не унесет] " \
"imb. àl da-quska da-qimn sɛtɔŋna а там в чуме его жены узнали (КСб: 181)"
l = list(yield_examples(s))
s = "kur. āb anun мой ум, " \
"sul. anunan kʌjga бестолковая без ума] голова, " \
"sur. budaŋtan anunaŋ bʌntʲaŋ у него разума нет, " \
"pak. abaŋta aqta anun у меня хороший ум, " \
"sur. āt (t)bilʲεbεt bindεp anundasʲ я сделал своим умом, " \
"kel. anunan kεˀt – dajεŋ kεˀt безумец [без ума человек] – больной человек, " \
"kel. ūk kʌjga anun u bʌnsʲaŋ у тебя в голове ум есть или нет, " \
"kel. anunan kεtdiŋta dɨlʲgat daʁalεjin над безумцем потешались смеялись] дети, " \
"kel. qɔˀk kεdda anundiŋa turʲɛ bǝ̄nʲ da-íksʲibεsʲ одному человеку это на ум не придёт, " \
"kur. āb anundasʲ по моему мнению, " \
"bak. bǝ̄nʲ āb anundas не по своей воле " \
"ver. anun dʌkájnɛm она взялась за ум (КФТ: 63), " \
"asʲka qaɣεt datɔnɔq, anun daŋta bʌnʲsʲa ɔbɨlʲda когда старым стал, ума у него не стало (КФТ: 29)"
l = list(yield_examples(s))
self.assertEqual(len(l), 13)
s = "kur. būŋ hʌnʲunʲaŋ они маленькие, " \
"buŋnaŋa ɔnʲa sʲɨkŋ? им сколько лет? " \
"būŋ “ʌtna nʲɛmsʲaŋ” ɔvɨlʲdɛn они «нашими немцами» были (ПМБ: 252), " \
"sʲulʲtu kàlʲ ēnʲ ɔvɨlʲdɛ была теперь кровавая война (ПМБ: 254), " \
"buttɔ būŋ bɛˀk bʌnʲsʲaŋ sʲɛ́ɛ̀ŋ будто они здесь никогда не были (ПМБ: 261), " \
"būŋ ɛk lʲʌʁɛsʲaŋ dimbɛsʲin, ʌtna qɔkŋdiŋ dimbɛsʲin tunʲɛ súran-qáŋnʲiŋ-dɛˀŋ они пришли лишь за пушниной, в бор наш они пришли эти люди полуденных гор (ПМБ: 213), " \
"qájɛ qálnas qíbdaŋta ʌtna dɛˀŋ dimbɛsʲin qúkdiŋ, járɔmkadiŋ būŋ dimbɛsʲin потом в месяце сбора налога [июне] наши люди пришли на Енисей, на ярмарку они пришли (ПМБ: 214), " \
"diˑmbɛsʲin sʲēlʲ dʌqdiŋ būŋ, diˑmbɛsʲin bənʲ áqta qá:nʲdiŋ пришли к непристойной жизни, пришли к нехорошим словам (ПМБ: 215)"
l = list(yield_examples(s))
self.assertEqual(len(l), 8)
s = "dūɣ dɨ̄lʲ кричащий ребёнок, " \
"dūɣ tʲīpʲ лающая собака, " \
"kel. qusʲd hɨjga dūɣ dɛˀŋ duɣan в чуме шумные [кричащие] люди сидят, " \
"kel. kirʲ dūɣ kɛˀt ʌɣa t-kaujak этот шумный [кричащий] человек сюда зашёл, " \
"kel. tūrʲ asɛsʲ dɨ̄lʲ, dɨ̄lʲ duɣsʲ это какой ребёнок, это ребёнок шумный"
l = list(yield_examples(s))
self.assertEqual(len(l), 5)
s = "sur. bū tɔˀn d-buŋsɔʁɔ, ɛta qɔrʲa ɨ̄n saːlaŋ dugdɛ bə̄nʲ bīn tɔːlɔʁut он так выглядит, как будто две ночи подряд не спал, " \
"kinij tɔˀn ā ɛta qɔrʲa sʲīl сегодня такая жара как будто летом, " \
"mad. bū ra-ɛsʲɔlɛj, ɛta qɔrʲa ə̄t ɔgdɛnan она кричала, как будто мы глухие [без ушей], " \
"mad. ū ɛta qɔrʲa bīn bə̄nʲ itkum ты как будто сам не знаешь, " \
"mad. ɛta qɔrʲa āt itparɛm turʲɛ bɛsʲa ɔbɨlda кажется, я знаю это кто был, " \
"bak. iˀlʲ qɔda kɛtda hū песня как будто человека сердце, " \
"bak. tɔˀn aqta dubil, qɔda kɛˀt dahudil da-kásɔnam так хорошо поёт, как будто человека за его сердце берёт"
l = list(yield_examples(s))
self.assertEqual(len(l), 7)
s = "sul. iŋɔlt qusʲam шкура одна, " \
"kur. ulʲtu iŋɔlt сырая шкура, " \
"mad. tū iŋɔlʲta несушёная шкура, " \
"kur. dàŋ iŋɔlt выделанная [мятая] шкура, " \
"kur. hʌlat iŋɔlt замша [ровдужная шкура], " \
"kur. saqda iŋɔlʲt шкура белки-самца, " \
"mad. ɔ̀nʲ saːnna iŋɔlʲtɛŋ много шкур белок, " \
"kur. sεlεda iŋɔlʲt оленья шкура, " \
"kur. kusna iŋɔltɛŋ коровьи шкуры, " \
"kur. sʲīlʲ ɔllasda iŋɔltə пыжик [шкура летнего (новорождённого) телёнка], " \
"kur. sʲεlʲda bulʲaŋd iŋɔlt камус [шкура с ног оленя], " \
"sur. kulʲapda iŋɔlt шкура горностая, " \
"kel. tiɣda iŋɔlʲt змеиная шкура, " \
"pak. āt kunda iŋɔlt dʲεpqɔlʲdɔnʲ я росомахи шкуру снял, " \
"sur. aʲvaŋta kiˀ iŋɔltə bʌnʲsʲaŋ у меня новой пушнины [звериных шкур] нет, " \
"kel. ēnʲ kə̄t assanɔ kεˀt assεnna iŋɔltaŋ qɔmat diɣunbεsʲ этой зимой охотник пушнины [звериных шкур] принёс мало " \
"kel. qima sεlʲda iŋɔlʲt dʌvrʲaŋ бабушка мнёт оленью шкуру, " \
"sul. ʌlʲd iŋɔlt irʲiŋuksʲat у лягушки шкура узорчатая, " \
"sul. iŋɔlt(d) ʌːta āt ditaʁut я сплю на шкуре"
l = list(yield_examples(s))
self.assertEqual(len(l), 19)
s = "mad. turʲɛ kɛˀt qɔnɔksʲ dʌqta ra-tasʲiŋavɛt вот этот человек (женщина) утром рано [быстро] встаёт, " \
"kel. tūrʲ kɛˀt āb ōp этот человек мой отец, " \
"mad. turʲɛ dɨ̄lʲ bə̄nʲ āb hɨˀp этот ребёнок не мой сын, " \
"mad. turʲɛ kɛrʲa lə̄q этого человека пушнина, " \
"bak. ū baˀt tudɛ bə̄nʲ (k)tɔbinʲgij ты правда это не говорил, " \
"mad. turɛdiŋa ōksʲ hʌninsʲa ɔvílda у этого (капкана) палка маленькая была, " \
"mad. tūrʲɔ qɔtá najarij вот он [тот] впереди шевелится, " \
"kel. turʲɛ bə̄nʲ ʌtna kuˀsʲ это не наша корова, " \
"kel. turʲɛ aksʲ tunbisʲ? – qīmd súùlʲ, tū sʲuːʲlʲd ʌ́ʌ̀t qimn (t)tɔlʲaŋɢɔtin это что такое? – женская нарта, на такой нарте женщины ездят, " \
"kel. tū bitsʲɛ? это кто (о мужчине)? " \
"kel. tū tɔˀn tɨŋalʲam вот настолько высоко, " \
"kel. turʲɛ tavut, ūk ɨlʲɣa, bə̄nʲ kutɔŋ это лежит, возле тебя, не видишь, " \
"kel. tunɛ dɛˀŋ inʲam dɔlʲdɛɣin эти люди давно жили, " \
"kel. tunɛ dɛˀŋ utisʲ dɛˀŋ эти люди родственники, " \
"kel. tū kʌnʲdaŋ dεˀŋ dεˀŋ (d)pɔsɔbarɔŋɔbεtin эти добрые люди людям помогают, " \
"kel. hɨlʲ turɔ́ вон он! " \
"kel. hɨlʲ tunɛ dɛˀŋ araŋɔt эти вот люди болеют " \
"sur. tuda īsʲ nado toʁajaŋɢat эту рыбу сушить надо (ЛЯНС41: 250), " \
"pak. usʲka diːnbɛs, (d)buŋsɔʁɔ – tudʌ buŋna kaˀt baŋŋusʲ hapta, tudʌ kaˀt sɨˀk домой (он) пришёл, смотрит – это их старая землянка стоит, это старое корыто (КФТ: 19)"
l = list(yield_examples(s))
#for ll in l:
# print('%s %s %s %s %s' % tuple(ll))
self.assertEqual(len(l), 19)
s = "danʲáptɛt я строгаю это, danʲabílʲtɛt я строгал это "
l = list(yield_examples(s))
self.assertEqual(len(l), 2)
s = "kel. qīp thitlut iʁɔt dahɔ́lɛtɛsʲ месяц сел, солнце встало " \
"sket. qīp thitsut [thitsuʁut] луна заходит (WER1: 317), " \
"cket., nket. thɛtsɔʁɔt он заходит (1b : 28) (WER1: 317), " \
"sket. ī dahitsut [dahitsuʁut] солнце заходит (WER1: 317), " \
"cket., nket. dahɛ́tsɔʁɔt она заходит (WER1: 317), " \
"diˑmbɛsʲin bīk dɛˀŋ hāj biksʲa, itlʲan baŋdiŋalʲ dimbɛsʲin, ī dahítsut baŋdiŋalʲ пришли чужие люди снова, из неведомой страны пришли, из страны, в которой солнце заходит (ПМБ: 214)"
l = list(yield_examples(s))
self.assertEqual(set(o[0] for o in l if o[0] is not None), {'kel', 'sket', 'cket', 'nket'})
s = "kel. buŋtɛt kɛˀt sʲēlʲ bilbɛt глупый человек плохо сделал, " \
"kel. buŋtɛt hīɣ ʌtna tān dɛjsɔʁɔt дурной мужик на нас ругается, " \
"kel. ʌtna kɛˀt buŋtɛtsʲ наш человек чокнутый [глупый] " \
"manʲmaŋ, ə̄tn darʲij dɛˀŋ, buttɔ ə̄tn buŋtɛt dɛˀŋ говорят, мы дураки, будто мы глупые люди (ПМБ: 261), " \
"haj at anʲɛŋilʲgɛt tɔˀnʲ, butta bʌˀj kʌˀ-qɔlʲɛpkaru uɣil sʲɛlʲdu, buŋtɛtdu и не думай так, будто друг там за рекой тебя хуже, глупей (ПМБ: 230)"
l = list(yield_examples(s))
self.assertEqual(len(l), 5)
s = "bū qusʲ-t hìj dujutɔ он чум ставить собирается, " \
"bū quˀsʲ kisʲɛ̀ŋ hij-εsʲaŋ dutabak он чум здесь ставить собирается, " \
"quˀsʲ kisʲɛ́ŋ hij-εsʲaŋ daqɔˀj он чум здесь ставить хочет " \
"hij-ɛsʲaŋ quˀŋ nada qajga чумы надо ставить на яру (ПМБ: 203)"
l = list(yield_examples(s))
self.assertEqual(len(l), 4)
s = "bak. qūsʲ ē одно железо, " \
"kur. tarɛ ē кованое [битое] железо, " \
"sul. áàŋ ē горячее железо, " \
"sul. ē aːŋam железо горячее, " \
"kel. turʲə ē aːŋsʲ это железо горячее, " \
"sul. kɨlʲtɛt ē кованое железо, " \
"sul. ē kɨltɛts железо кованное, " \
"bak. kɔlɛtdiŋta tʌŋdiŋal, ēdiŋal ɛŋŋuŋ dεˀŋ dubbɛtin в городе из камня, из железа дома люди делают " \
"hʌtnuraŋdiŋt ē dusʲqimnʲan в плавильнях железо выплавляли (ПМБ: 243), " \
"āt huːlasʲ (t)kɨlʲdavintɛt aʁatld ʌʁat ē я железо молотком кую [бью] на наковальне (СНСС72: 126), " \
"pak. ɛd dūɣ стрельба [крик железа] "
l = list(yield_examples(s))
#for ll in l:
# print('%s | %s | %s | %s | %s' % tuple(ll))
self.assertEqual(len(l), 11)
s = "sul. qūsʲ asʲpulʲ одно облако, " \
"sul. asʲvulʲ qusʲam облако одно, " \
"sur. ulʲεsʲ aspulʲ дождевое облако, " \
"el. tum aspulʲ чёрная туча, " \
"kur. quŋtεt aspulʲ грозовая туча, " \
"kur. ēkŋ asʲpul εsavut грозовая туча поднимается, " \
"kel. āt asʲbulʲ ditɔŋ я тучу вижу, " \
"sul. aspulaŋ bʌnsʲaŋ туч нет, " \
"sul. asʲpulʲdiŋalʲ ulʲata из тучи дождь идёт, " \
"kel. ulʲɛsʲ aspulʲ arʲɛn tɔsa qɔlʲapka aŋapta дождевое облако над лесом висит, " \
"bak. hʌlatbεsʲ aspulaŋ ɔŋɔt по небу облака идут, " \
"kel. asʲpulʲ bēj da-bugbit облако ветром несёт, " \
"kel. qimdɨlʲ aspul da-kɔlʲdɔ девочка на облако смотрела, " \
"kel. tum asʲpulʲ ʌɣa bēj da-bugbiʁɔs чёрное облако ветер сюда несёт " \
"ēkŋ qām duɣaŋgɔʁan, qat qarʲuːn, aspulʲaŋ utal ēsʲ (t)kajnamin гроза скоро начнется, посмотрите, тучи обложили всё небо (СНСС72: 147), " \
"quŋlɔɣin ʌla, aksʲ ǝ̄k bǝ̄nʲ kutɔɣin ulεstu aspulʲ? посмотрите наружу, разве вы не видите грозовую тучу? (СНСС72: 151)"
l = list(yield_examples(s))
#for ll in l:
# print('%s | %s | %s | %s | %s' % tuple(ll))
self.assertEqual(len(l), 16)
s = "sur. ūk inεŋ твои ногти, " \
"kel. qūsʲ ìn один ноготь, " \
"kel. ìn qusʲam ноготь один, " \
"sul. qaɣam inεŋ пять ногтей, " \
"sul. ìn sintuɣam ноготь грязный, " \
"kur. kεdda ìn ноготь человека, " \
"kur. tabna inεŋ когти собак, " \
"kur. sutaqd ìn ноготь среднего пальца, " \
"kur. qɔjda inεŋ когти медведя, " \
"kel. hɨˀj inεŋasʲ ùt (t)tɔɣaulʲtεt сова схватила мышь когтями, " \
"bū kɔˀp (t)kasʲɔnεm, daqɔbεtbεsʲ dεtavinʲtaŋ; kɔbda qɔbεtka qɔjda inεŋdiŋalʲ qāk tumaŋ (s)lεːdaŋ igdɔbɔn он бурундука взял, по спине погладил; от медвежьих когтей на спине бурундука пять чёрных полос [следов] осталось (СНСС81: 57), " \
"inεŋ àj небольшая сумочка из шкурок с лап соболя, выдры, росомахи [когти сумка] (К67: 117)"
l = list(yield_examples(s))
#for ll in l:
# print('%s | %s | %s | %s | %s' % tuple(ll))
self.assertEqual(len(l), 12)
s = "kel. abcd? efgh? kel. ijkl mnop"
l = list(yield_examples(s))
self.assertEqual(l[0][2], 'efgh?')
self.assertEqual(l[1][2], 'mnop')
s = "kel. hīɣ qɔˀk duɣaraq мужик один живёт, " \
"kel. bū qɔˀk kɛˀt, ariŋa duɣaraq он один [один человек], в лесу живёт, " \
"qɔˀk huˀn одна дочь (СНСС72: 83), " \
"qɔksʲadaŋtɛn ɨ̄n kʌˀt у одного (человека) двое детей (СНСС72: 83), " \
"qɔkdadiŋtan dɔˀŋ dɨlʲgat, kunsʲa qimdiŋtan qɔˀk dɨ̄lʲ у одной было трое детей, у другой женщины один ребёнок (СНСС81: 40), " \
"qɔˀk qīm qā daigdɔʁɔn одна баба дома осталась (СНСС72: 139), " \
"āt qɔˀk kɛˀt digdɔʁɔn я одна осталась (СНСС72: 107), " \
"qɔˀk ɔstɨk i qɔˀk hʌmga один кет и один эвенк (CHCC81ː 44), " \
"pak. qɔˀk saˀq bīk ɔksʲdaŋa da-ɛtʲditnam другая [одна] белка соскочила на другое дерево (КФТ: 55)"
l = list(yield_examples(s))
#for ll in l:
# print('%s | %s | %s | %s | %s' % tuple(ll))
#
# FIXME: incorrect parsing of
# qɔˀk ɔstɨk i qɔˀk hʌmga один кет и один эвенк (CHCC81ː 44)
#
self.assertEqual(len(l), 9)
def test_variants(self):
l = list(yield_variants('sket.'))
self.assertEqual(l, [('sket', None)])
l = list(yield_variants('sket., nket.'))
self.assertEqual(l, [('sket', None), ('nket', None)])
l = list(yield_variants('sket. abc, nket. def'))
self.assertEqual(l, [('sket', 'abc'), ('nket', 'def')])
l = list(yield_variants('sket., nket. abc, cket. def'))
self.assertEqual(l, [('sket', 'abc'), ('nket', 'abc'), ('cket', 'def')])
l = list(yield_variants('nket. a, cket. b, c'))
self.assertEqual(l, [('nket', 'a'), ('cket', 'b'), ('cket', 'c')])
l = list(yield_variants('nket. a, sket., cket. b, c'))
self.assertEqual(l, [('nket', 'a'), ('sket', 'b'), ('cket', 'b'), ('sket', 'c'), ('cket', 'c')])
|
clld/cdk
|
cdk/tests/test_import.py
|
Python
|
apache-2.0
| 26,718
|
[
"CDK"
] |
c2773d1087157f3faa1a429b56be423f84448e149c4ac83a3d4c4fde2a357987
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like", "just", "love", "know", "never", "want", "time", "out", "there",
"make", "look", "eye", "down", "only", "think", "heart", "back", "then",
"into", "about", "more", "away", "still", "them", "take", "thing", "even",
"through", "long", "always", "world", "too", "friend", "tell", "try",
"hand", "thought", "over", "here", "other", "need", "smile", "again",
"much", "cry", "been", "night", "ever", "little", "said", "end", "some",
"those", "around", "mind", "people", "girl", "leave", "dream", "left",
"turn", "myself", "give", "nothing", "really", "off", "before", "something",
"find", "walk", "wish", "good", "once", "place", "ask", "stop", "keep",
"watch", "seem", "everything", "wait", "got", "yet", "made", "remember",
"start", "alone", "run", "hope", "maybe", "believe", "body", "hate",
"after", "close", "talk", "stand", "own", "each", "hurt", "help", "home",
"god", "soul", "new", "many", "two", "inside", "should", "true", "first",
"fear", "mean", "better", "play", "another", "gone", "change", "use",
"wonder", "someone", "hair", "cold", "open", "best", "any", "behind",
"happen", "water", "dark", "laugh", "stay", "forever", "name", "work",
"show", "sky", "break", "came", "deep", "door", "put", "black", "together",
"upon", "happy", "such", "great", "white", "matter", "fill", "past",
"please", "burn", "cause", "enough", "touch", "moment", "soon", "voice",
"scream", "anything", "stare", "sound", "red", "everyone", "hide", "kiss",
"truth", "death", "beautiful", "mine", "blood", "broken", "very", "pass",
"next", "forget", "tree", "wrong", "air", "mother", "understand", "lip",
"hit", "wall", "memory", "sleep", "free", "high", "realize", "school",
"might", "skin", "sweet", "perfect", "blue", "kill", "breath", "dance",
"against", "fly", "between", "grow", "strong", "under", "listen", "bring",
"sometimes", "speak", "pull", "person", "become", "family", "begin",
"ground", "real", "small", "father", "sure", "feet", "rest", "young",
"finally", "land", "across", "today", "different", "guy", "line", "fire",
"reason", "reach", "second", "slowly", "write", "eat", "smell", "mouth",
"step", "learn", "three", "floor", "promise", "breathe", "darkness", "push",
"earth", "guess", "save", "song", "above", "along", "both", "color",
"house", "almost", "sorry", "anymore", "brother", "okay", "dear", "game",
"fade", "already", "apart", "warm", "beauty", "heard", "notice", "question",
"shine", "began", "piece", "whole", "shadow", "secret", "street", "within",
"finger", "point", "morning", "whisper", "child", "moon", "green", "story",
"glass", "kid", "silence", "since", "soft", "yourself", "empty", "shall",
"angel", "answer", "baby", "bright", "dad", "path", "worry", "hour", "drop",
"follow", "power", "war", "half", "flow", "heaven", "act", "chance", "fact",
"least", "tired", "children", "near", "quite", "afraid", "rise", "sea",
"taste", "window", "cover", "nice", "trust", "lot", "sad", "cool", "force",
"peace", "return", "blind", "easy", "ready", "roll", "rose", "drive",
"held", "music", "beneath", "hang", "mom", "paint", "emotion", "quiet",
"clear", "cloud", "few", "pretty", "bird", "outside", "paper", "picture",
"front", "rock", "simple", "anyone", "meant", "reality", "road", "sense",
"waste", "bit", "leaf", "thank", "happiness", "meet", "men", "smoke",
"truly", "decide", "self", "age", "book", "form", "alive", "carry",
"escape", "damn", "instead", "able", "ice", "minute", "throw", "catch",
"leg", "ring", "course", "goodbye", "lead", "poem", "sick", "corner",
"desire", "known", "problem", "remind", "shoulder", "suppose", "toward",
"wave", "drink", "jump", "woman", "pretend", "sister", "week", "human",
"joy", "crack", "grey", "pray", "surprise", "dry", "knee", "less", "search",
"bleed", "caught", "clean", "embrace", "future", "king", "son", "sorrow",
"chest", "hug", "remain", "sat", "worth", "blow", "daddy", "final",
"parent", "tight", "also", "create", "lonely", "safe", "cross", "dress",
"evil", "silent", "bone", "fate", "perhaps", "anger", "class", "scar",
"snow", "tiny", "tonight", "continue", "control", "dog", "edge", "mirror",
"month", "suddenly", "comfort", "given", "loud", "quickly", "gaze", "plan",
"rush", "stone", "town", "battle", "ignore", "spirit", "stood", "stupid",
"yours", "brown", "build", "dust", "hey", "kept", "pay", "phone", "twist",
"although", "ball", "beyond", "hidden", "nose", "taken", "fail", "float",
"pure", "somehow", "wash", "wrap", "angry", "cheek", "creature",
"forgotten", "heat", "rip", "single", "space", "special", "weak",
"whatever", "yell", "anyway", "blame", "job", "choose", "country", "curse",
"drift", "echo", "figure", "grew", "laughter", "neck", "suffer", "worse",
"yeah", "disappear", "foot", "forward", "knife", "mess", "somewhere",
"stomach", "storm", "beg", "idea", "lift", "offer", "breeze", "field",
"five", "often", "simply", "stuck", "win", "allow", "confuse", "enjoy",
"except", "flower", "seek", "strength", "calm", "grin", "gun", "heavy",
"hill", "large", "ocean", "shoe", "sigh", "straight", "summer", "tongue",
"accept", "crazy", "everyday", "exist", "grass", "mistake", "sent", "shut",
"surround", "table", "ache", "brain", "destroy", "heal", "nature", "shout",
"sign", "stain", "choice", "doubt", "glance", "glow", "mountain", "queen",
"stranger", "throat", "tomorrow", "city", "either", "fish", "flame",
"rather", "shape", "spin", "spread", "ash", "distance", "finish", "image",
"imagine", "important", "nobody", "shatter", "warmth", "became", "feed",
"flesh", "funny", "lust", "shirt", "trouble", "yellow", "attention", "bare",
"bite", "money", "protect", "amaze", "appear", "born", "choke",
"completely", "daughter", "fresh", "friendship", "gentle", "probably",
"six", "deserve", "expect", "grab", "middle", "nightmare", "river",
"thousand", "weight", "worst", "wound", "barely", "bottle", "cream",
"regret", "relationship", "stick", "test", "crush", "endless", "fault",
"itself", "rule", "spill", "art", "circle", "join", "kick", "mask",
"master", "passion", "quick", "raise", "smooth", "unless", "wander",
"actually", "broke", "chair", "deal", "favorite", "gift", "note", "number",
"sweat", "box", "chill", "clothes", "lady", "mark", "park", "poor",
"sadness", "tie", "animal", "belong", "brush", "consume", "dawn", "forest",
"innocent", "pen", "pride", "stream", "thick", "clay", "complete", "count",
"draw", "faith", "press", "silver", "struggle", "surface", "taught",
"teach", "wet", "bless", "chase", "climb", "enter", "letter", "melt",
"metal", "movie", "stretch", "swing", "vision", "wife", "beside", "crash",
"forgot", "guide", "haunt", "joke", "knock", "plant", "pour", "prove",
"reveal", "steal", "stuff", "trip", "wood", "wrist", "bother", "bottom",
"crawl", "crowd", "fix", "forgive", "frown", "grace", "loose", "lucky",
"party", "release", "surely", "survive", "teacher", "gently", "grip",
"speed", "suicide", "travel", "treat", "vein", "written", "cage", "chain",
"conversation", "date", "enemy", "however", "interest", "million", "page",
"pink", "proud", "sway", "themselves", "winter", "church", "cruel", "cup",
"demon", "experience", "freedom", "pair", "pop", "purpose", "respect",
"shoot", "softly", "state", "strange", "bar", "birth", "curl", "dirt",
"excuse", "lord", "lovely", "monster", "order", "pack", "pants", "pool",
"scene", "seven", "shame", "slide", "ugly", "among", "blade", "blonde",
"closet", "creek", "deny", "drug", "eternity", "gain", "grade", "handle",
"key", "linger", "pale", "prepare", "swallow", "swim", "tremble", "wheel",
"won", "cast", "cigarette", "claim", "college", "direction", "dirty",
"gather", "ghost", "hundred", "loss", "lung", "orange", "present", "swear",
"swirl", "twice", "wild", "bitter", "blanket", "doctor", "everywhere",
"flash", "grown", "knowledge", "numb", "pressure", "radio", "repeat",
"ruin", "spend", "unknown", "buy", "clock", "devil", "early", "false",
"fantasy", "pound", "precious", "refuse", "sheet", "teeth", "welcome",
"add", "ahead", "block", "bury", "caress", "content", "depth", "despite",
"distant", "marry", "purple", "threw", "whenever", "bomb", "dull", "easily",
"grasp", "hospital", "innocence", "normal", "receive", "reply", "rhyme",
"shade", "someday", "sword", "toe", "visit", "asleep", "bought", "center",
"consider", "flat", "hero", "history", "ink", "insane", "muscle", "mystery",
"pocket", "reflection", "shove", "silently", "smart", "soldier", "spot",
"stress", "train", "type", "view", "whether", "bus", "energy", "explain",
"holy", "hunger", "inch", "magic", "mix", "noise", "nowhere", "prayer",
"presence", "shock", "snap", "spider", "study", "thunder", "trail", "admit",
"agree", "bag", "bang", "bound", "butterfly", "cute", "exactly", "explode",
"familiar", "fold", "further", "pierce", "reflect", "scent", "selfish",
"sharp", "sink", "spring", "stumble", "universe", "weep", "women",
"wonderful", "action", "ancient", "attempt", "avoid", "birthday", "branch",
"chocolate", "core", "depress", "drunk", "especially", "focus", "fruit",
"honest", "match", "palm", "perfectly", "pillow", "pity", "poison", "roar",
"shift", "slightly", "thump", "truck", "tune", "twenty", "unable", "wipe",
"wrote", "coat", "constant", "dinner", "drove", "egg", "eternal", "flight",
"flood", "frame", "freak", "gasp", "glad", "hollow", "motion", "peer",
"plastic", "root", "screen", "season", "sting", "strike", "team", "unlike",
"victim", "volume", "warn", "weird", "attack", "await", "awake", "built",
"charm", "crave", "despair", "fought", "grant", "grief", "horse", "limit",
"message", "ripple", "sanity", "scatter", "serve", "split", "string",
"trick", "annoy", "blur", "boat", "brave", "clearly", "cling", "connect",
"fist", "forth", "imagination", "iron", "jock", "judge", "lesson", "milk",
"misery", "nail", "naked", "ourselves", "poet", "possible", "princess",
"sail", "size", "snake", "society", "stroke", "torture", "toss", "trace",
"wise", "bloom", "bullet", "cell", "check", "cost", "darling", "during",
"footstep", "fragile", "hallway", "hardly", "horizon", "invisible",
"journey", "midnight", "mud", "nod", "pause", "relax", "shiver", "sudden",
"value", "youth", "abuse", "admire", "blink", "breast", "bruise",
"constantly", "couple", "creep", "curve", "difference", "dumb", "emptiness",
"gotta", "honor", "plain", "planet", "recall", "rub", "ship", "slam",
"soar", "somebody", "tightly", "weather", "adore", "approach", "bond",
"bread", "burst", "candle", "coffee", "cousin", "crime", "desert",
"flutter", "frozen", "grand", "heel", "hello", "language", "level",
"movement", "pleasure", "powerful", "random", "rhythm", "settle", "silly",
"slap", "sort", "spoken", "steel", "threaten", "tumble", "upset", "aside",
"awkward", "bee", "blank", "board", "button", "card", "carefully",
"complain", "crap", "deeply", "discover", "drag", "dread", "effort",
"entire", "fairy", "giant", "gotten", "greet", "illusion", "jeans", "leap",
"liquid", "march", "mend", "nervous", "nine", "replace", "rope", "spine",
"stole", "terror", "accident", "apple", "balance", "boom", "childhood",
"collect", "demand", "depression", "eventually", "faint", "glare", "goal",
"group", "honey", "kitchen", "laid", "limb", "machine", "mere", "mold",
"murder", "nerve", "painful", "poetry", "prince", "rabbit", "shelter",
"shore", "shower", "soothe", "stair", "steady", "sunlight", "tangle",
"tease", "treasure", "uncle", "begun", "bliss", "canvas", "cheer", "claw",
"clutch", "commit", "crimson", "crystal", "delight", "doll", "existence",
"express", "fog", "football", "gay", "goose", "guard", "hatred",
"illuminate", "mass", "math", "mourn", "rich", "rough", "skip", "stir",
"student", "style", "support", "thorn", "tough", "yard", "yearn",
"yesterday", "advice", "appreciate", "autumn", "bank", "beam", "bowl",
"capture", "carve", "collapse", "confusion", "creation", "dove", "feather",
"girlfriend", "glory", "government", "harsh", "hop", "inner", "loser",
"moonlight", "neighbor", "neither", "peach", "pig", "praise", "screw",
"shield", "shimmer", "sneak", "stab", "subject", "throughout", "thrown",
"tower", "twirl", "wow", "army", "arrive", "bathroom", "bump", "cease",
"cookie", "couch", "courage", "dim", "guilt", "howl", "hum", "husband",
"insult", "led", "lunch", "mock", "mostly", "natural", "nearly", "needle",
"nerd", "peaceful", "perfection", "pile", "price", "remove", "roam",
"sanctuary", "serious", "shiny", "shook", "sob", "stolen", "tap", "vain",
"void", "warrior", "wrinkle", "affection", "apologize", "blossom", "bounce",
"bridge", "cheap", "crumble", "decision", "descend", "desperately", "dig",
"dot", "flip", "frighten", "heartbeat", "huge", "lazy", "lick", "odd",
"opinion", "process", "puzzle", "quietly", "retreat", "score", "sentence",
"separate", "situation", "skill", "soak", "square", "stray", "taint",
"task", "tide", "underneath", "veil", "whistle", "anywhere", "bedroom",
"bid", "bloody", "burden", "careful", "compare", "concern", "curtain",
"decay", "defeat", "describe", "double", "dreamer", "driver", "dwell",
"evening", "flare", "flicker", "grandma", "guitar", "harm", "horrible",
"hungry", "indeed", "lace", "melody", "monkey", "nation", "object",
"obviously", "rainbow", "salt", "scratch", "shown", "shy", "stage", "stun",
"third", "tickle", "useless", "weakness", "worship", "worthless",
"afternoon", "beard", "boyfriend", "bubble", "busy", "certain", "chin",
"concrete", "desk", "diamond", "doom", "drawn", "due", "felicity", "freeze",
"frost", "garden", "glide", "harmony", "hopefully", "hunt", "jealous",
"lightning", "mama", "mercy", "peel", "physical", "position", "pulse",
"punch", "quit", "rant", "respond", "salty", "sane", "satisfy", "savior",
"sheep", "slept", "social", "sport", "tuck", "utter", "valley", "wolf",
"aim", "alas", "alter", "arrow", "awaken", "beaten", "belief", "brand",
"ceiling", "cheese", "clue", "confidence", "connection", "daily",
"disguise", "eager", "erase", "essence", "everytime", "expression", "fan",
"flag", "flirt", "foul", "fur", "giggle", "glorious", "ignorance", "law",
"lifeless", "measure", "mighty", "muse", "north", "opposite", "paradise",
"patience", "patient", "pencil", "petal", "plate", "ponder", "possibly",
"practice", "slice", "spell", "stock", "strife", "strip", "suffocate",
"suit", "tender", "tool", "trade", "velvet", "verse", "waist", "witch",
"aunt", "bench", "bold", "cap", "certainly", "click", "companion",
"creator", "dart", "delicate", "determine", "dish", "dragon", "drama",
"drum", "dude", "everybody", "feast", "forehead", "former", "fright",
"fully", "gas", "hook", "hurl", "invite", "juice", "manage", "moral",
"possess", "raw", "rebel", "royal", "scale", "scary", "several", "slight",
"stubborn", "swell", "talent", "tea", "terrible", "thread", "torment",
"trickle", "usually", "vast", "violence", "weave", "acid", "agony",
"ashamed", "awe", "belly", "blend", "blush", "character", "cheat", "common",
"company", "coward", "creak", "danger", "deadly", "defense", "define",
"depend", "desperate", "destination", "dew", "duck", "dusty", "embarrass",
"engine", "example", "explore", "foe", "freely", "frustrate", "generation",
"glove", "guilty", "health", "hurry", "idiot", "impossible", "inhale",
"jaw",
"kingdom", "mention", "mist", "moan", "mumble", "mutter", "observe", "ode",
"pathetic", "pattern", "pie", "prefer", "puff", "rape", "rare", "revenge",
"rude", "scrape", "spiral", "squeeze", "strain", "sunset", "suspend",
"sympathy", "thigh", "throne", "total", "unseen", "weapon", "weary"
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode(message):
assert len(message) % 8 == 0
out = []
for i in range(len(message) // 8):
word = message[8 * i:8 * i + 8]
x = int(word, 16)
w1 = (x % n)
w2 = ((x // n) + w1) % n
w3 = ((x // n // n) + w2) % n
out += [words[w1], words[w2], words[w3]]
return out
def mn_decode(wlist):
out = ''
for i in range(len(wlist) // 3):
word1, word2, word3 = wlist[3 * i:3 * i + 3]
w1 = words.index(word1)
w2 = (words.index(word2)) % n
w3 = (words.index(word3)) % n
x = w1 + n * ((w2 - w1) % n) + n * n * ((w3 - w2) % n)
out += '%08x' % x
return out
|
undeath/joinmarket-clientserver
|
jmclient/jmclient/old_mnemonic.py
|
Python
|
gpl-3.0
| 17,930
|
[
"CRYSTAL",
"VisIt"
] |
8cae77962905fede3cd71b040bf8102a9f633e5eb44cdacf494b5f0d55f4a0ba
|
import sys
import os
import re
# pycalphad must be importable to build API documentation and for version retreival
sys.path.insert(0, os.path.abspath('../pycalphad'))
from pycalphad import __version__ as pycalphad_version
pycalphad_version = re.sub('\.d[0-9]{8}', '', pycalphad_version) # remove .d<date>
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycalphad'
copyright = '2015, pycalphad Development Team'
author = 'pycalphad Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pycalphad_version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_autosummary']
extlinks = {'issue': ('https://github.com/pycalphad/pycalphad/issues/%s',
'issue ')}
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# WARNING: the dark style is added on top of the light style.
# The dark style CSS may not override all the light style, leading to strange behavior.
pygments_style = 'default'
pygments_dark_style = "native"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'pycalphad-logo-withtext.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycalphaddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycalphad.tex', 'pycalphad Documentation',
'pycalphad Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycalphad', 'pycalphad Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycalphad', 'pycalphad Documentation',
author, 'pycalphad', 'Computational thermodynamics in Python',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
tkphd/pycalphad
|
docs/conf.py
|
Python
|
mit
| 9,435
|
[
"pycalphad"
] |
3ab81573d13520de2163246811dd9622fb0e15ba06f6ce6057757342fc69624e
|
# Copyright (c) 2014, James Hensman, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import Model
from paramz import ObsAr
from .. import likelihoods
class GPKroneckerGaussianRegression(Model):
"""
Kronecker GP regression
Take two kernels computed on separate spaces K1(X1), K2(X2), and a data
matrix Y which is f size (N1, N2).
The effective covaraince is np.kron(K2, K1)
The effective data is vec(Y) = Y.flatten(order='F')
The noise must be iid Gaussian.
See [stegle_et_al_2011]_.
.. rubric:: References
.. [stegle_et_al_2011] Stegle, O.; Lippert, C.; Mooij, J.M.; Lawrence, N.D.; Borgwardt, K.:Efficient inference in matrix-variate Gaussian models with \iid observation noise. In: Advances in Neural Information Processing Systems, 2011, Pages 630-638
"""
def __init__(self, X1, X2, Y, kern1, kern2, noise_var=1., name='KGPR'):
super(GPKroneckerGaussianRegression, self).__init__(name=name)
# accept the construction arguments
self.X1 = ObsAr(X1)
self.X2 = ObsAr(X2)
self.Y = Y
self.kern1, self.kern2 = kern1, kern2
self.link_parameter(self.kern1)
self.link_parameter(self.kern2)
self.likelihood = likelihoods.Gaussian()
self.likelihood.variance = noise_var
self.link_parameter(self.likelihood)
self.num_data1, self.input_dim1 = self.X1.shape
self.num_data2, self.input_dim2 = self.X2.shape
assert kern1.input_dim == self.input_dim1
assert kern2.input_dim == self.input_dim2
assert Y.shape == (self.num_data1, self.num_data2)
def log_likelihood(self):
return self._log_marginal_likelihood
def parameters_changed(self):
(N1, D1), (N2, D2) = self.X1.shape, self.X2.shape
K1, K2 = self.kern1.K(self.X1), self.kern2.K(self.X2)
# eigendecompositon
S1, U1 = np.linalg.eigh(K1)
S2, U2 = np.linalg.eigh(K2)
W = np.kron(S2, S1) + self.likelihood.variance
Y_ = U1.T.dot(self.Y).dot(U2)
# store these quantities: needed for prediction
Wi = 1./W
Ytilde = Y_.flatten(order='F')*Wi
self._log_marginal_likelihood = -0.5*self.num_data1*self.num_data2*np.log(2*np.pi)\
-0.5*np.sum(np.log(W))\
-0.5*np.dot(Y_.flatten(order='F'), Ytilde)
# gradients for data fit part
Yt_reshaped = Ytilde.reshape(N1, N2, order='F')
tmp = U1.dot(Yt_reshaped)
dL_dK1 = .5*(tmp*S2).dot(tmp.T)
tmp = U2.dot(Yt_reshaped.T)
dL_dK2 = .5*(tmp*S1).dot(tmp.T)
# gradients for logdet
Wi_reshaped = Wi.reshape(N1, N2, order='F')
tmp = np.dot(Wi_reshaped, S2)
dL_dK1 += -0.5*(U1*tmp).dot(U1.T)
tmp = np.dot(Wi_reshaped.T, S1)
dL_dK2 += -0.5*(U2*tmp).dot(U2.T)
self.kern1.update_gradients_full(dL_dK1, self.X1)
self.kern2.update_gradients_full(dL_dK2, self.X2)
# gradients for noise variance
dL_dsigma2 = -0.5*Wi.sum() + 0.5*np.sum(np.square(Ytilde))
self.likelihood.variance.gradient = dL_dsigma2
# store these quantities for prediction:
self.Wi, self.Ytilde, self.U1, self.U2 = Wi, Ytilde, U1, U2
def predict(self, X1new, X2new):
"""
Return the predictive mean and variance at a series of new points X1new, X2new
Only returns the diagonal of the predictive variance, for now.
:param X1new: The points at which to make a prediction
:type X1new: np.ndarray, Nnew x self.input_dim1
:param X2new: The points at which to make a prediction
:type X2new: np.ndarray, Nnew x self.input_dim2
"""
k1xf = self.kern1.K(X1new, self.X1)
k2xf = self.kern2.K(X2new, self.X2)
A = k1xf.dot(self.U1)
B = k2xf.dot(self.U2)
mu = A.dot(self.Ytilde.reshape(self.num_data1, self.num_data2, order='F')).dot(B.T).flatten(order='F')
k1xx = self.kern1.Kdiag(X1new)
k2xx = self.kern2.Kdiag(X2new)
BA = np.kron(B, A)
var = np.kron(k2xx, k1xx) - np.sum(BA**2*self.Wi, 1) + self.likelihood.variance
return mu[:, None], var[:, None]
|
SheffieldML/GPy
|
GPy/models/gp_kronecker_gaussian_regression.py
|
Python
|
bsd-3-clause
| 4,296
|
[
"Gaussian"
] |
d71f88181445c1fa386bea973979b4c75c7eb3041f22b10841c7dae4336db1c5
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
from multiprocessing.pool import ThreadPool
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
LOGGER = logging.getLogger('upload')
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "HOSTED"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.clockwork.net"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS = [
{
'name': VCS_MERCURIAL,
'aliases': ['hg', 'mercurial'],
}, {
'name': VCS_SUBVERSION,
'aliases': ['svn', 'subversion'],
}, {
'name': VCS_PERFORCE,
'aliases': ['p4', 'perforce'],
}, {
'name': VCS_GIT,
'aliases': ['git'],
}, {
'name': VCS_CVS,
'aliases': ['cvs'],
}]
VCS_SHORT_NAMES = [] # hg, svn, ...
VCS_ABBREVIATIONS = {} # alias: name, ...
for vcs in VCS:
VCS_SHORT_NAMES.append(min(vcs['aliases'], key=len))
VCS_ABBREVIATIONS.update((alias, vcs['name']) for alias in vcs['aliases'])
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
ERROR_PARAM = 'error'
OAUTH_DEFAULT_ERROR_MESSAGE = 'OAuth 2.0 error occurred.'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
<script>
window.onload = function() {
window.close();
}
</script>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
LOGGER.info("Server: %s; Host: %s", self.host, self.host_override)
else:
LOGGER.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
LOGGER.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated and self.auth_function:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req, timeout=70)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
if not self.auth_function:
raise
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
# TODO: We should error out on a 500, but the server is too flaky
# for that at the moment.
StatusUpdate('Upload got a 500 response: %d' % e.code)
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
group.add_option("-j", "--number-parallel-uploads",
dest="num_upload_threads", default=8,
help="Number of uploads to do in parallel.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Explicitly specify version control system (%s)"
% ", ".join(VCS_SHORT_NAMES)))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity percentage for detecting "
"renames and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_only_search_patch", action="store_false", default=True,
dest='git_find_copies_harder',
help="Removes --find-copies-harder when seaching for copies")
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# SVN specific
group = parser.add_option_group("SVN-specific options")
group.add_option("--svn_explicit_branches", action="store_true", dest="svn_explicit_branches",
default=False,
help="Use explicit bases for svn diff source and target. Use "
"--svn-source=URL/path@rev1 --svn-target=URL/path@rev2")
group.add_option( "--svn_source", action="store", dest="svn_source",
default=None,
help="Source svn URL to diff against" )
group.add_option( "--svn_target", action="store", dest="svn_target",
default=None,
help="Target svn URL to diff against" )
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
or an error and then stops serving.
"""
access_token = None
error = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token or error and then stops serving.
"""
def SetResponseValue(self):
"""Stores the access token or error from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' or 'error' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
if query_params.has_key(ACCESS_TOKEN_PARAM):
access_token_list = query_params[ACCESS_TOKEN_PARAM]
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
else:
error_list = query_params.get(ERROR_PARAM, [])
if len(error_list) == 1:
self.server.error = error_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetResponseValue()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
A boolean indicating whether the page opened successfully.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
page_opened = webbrowser.open(page, new=1, autoraise=True)
if page_opened:
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
return page_opened
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
if httpd.access_token is None:
ErrorExit(httpd.error or OAUTH_DEFAULT_ERROR_MESSAGE)
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
page_opened = OpenOAuth2ConsentPage(server=server, port=port)
if page_opened:
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
LOGGER.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
LOGGER.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if is_binary:
return "Not uploading binary files."
if len(content) > MAX_UPLOAD_SIZE:
result = ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
elif options.verbose:
result = "Uploading %s file for %s" % (type, filename)
checksum = md5(content).hexdigest()
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload file for %s. Got %d status code." %
(filename, e.code))
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return result
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, base_content, is_binary, status, True))
threads.append(t)
if new_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, new_content, is_binary, status, False))
threads.append(t)
for t in threads:
print t.get(timeout=60)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/") and not mimetype.startswith("image/svg")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
LOGGER.info("setting up subversion.")
if self.options.svn_explicit_branches:
LOGGER.info("svn explicit branches.")
if self.options.svn_source:
match = re.match(r"([^@]+)(@(.+))?", self.options.svn_source)
self.svn_source_url = match.group(1)
self.rev_start = match.group(3) or "HEAD"
if self.options.svn_target:
match = re.match(r"([^@]+)(@(.+))?", self.options.svn_target)
self.svn_target_url = match.group(1)
self.rev_end = match.group(3) or "HEAD"
else:
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
LOGGER.info(required)
if self.options.svn_explicit_branches:
self.svn_base = self.svn_source_url
else:
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
if self.options.svn_explicit_branches:
return self.svn_source_url
else:
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
LOGGER.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
if self.options.svn_explicit_branches:
cmd = ["svn", "info", self.svn_source_url ]
else:
cmd = ["svn", "info"]
for line in RunShell(cmd).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.svn_explicit_branches:
cmd += [self.svn_source_url + "@" + self.rev_start]
cmd += [self.svn_target_url + "@" + self.rev_end]
else:
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision and not self.options.svn_explicit_branches:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
if self.options.svn_explicit_branches:
separator = "" if dirname.startswith( "/" ) else "/"
cmd = [
"svn",
"list",
self.svn_source_url + separator + dirname + "@" + self.rev_start or "."
]
else:
cmd = [
"svn",
"list",
"-r",
self.rev_start,
self._EscapeFilename(dirname) or "."
]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
if self.options.svn_explicit_branches:
# a new directory in the target branch makes a file appear to be present in the target branch only
status = "A "
return status
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
if self.options.svn_explicit_branches:
cmd = args + [self.svn_source_url + separator + dirname + "@" + self.rev_start or "."]
else:
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
if self.options.svn_explicit_branches:
mimetype = RunShell(
[
"svn",
"propget",
"svn:mime-type",
self.svn_target_url + "/" + filename + "@" + self.rev_end
],
silent_ok=True
)
else:
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
try:
new_content = self.ReadFile(filename)
except IOError:
# Ignore missing local image file (this can happen if the source rev
# is not HEAD)
pass
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.svn_explicit_branches:
url = "%s/%s@%s" % (self.svn_source_url, filename, self.rev_start)
elif self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.options.svn_explicit_branches:
url = "%s/%s@%s" % (self.svn_source_url, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules", "--src-prefix=a/", "--dst-prefix=b/",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
assert 0 <= self.options.git_similarity <= 100
if self.options.git_find_copies:
similarity_options = ["-l100000", "-C%d%%" % self.options.git_similarity]
if self.options.git_find_copies_harder:
similarity_options.append("--find-copies-harder")
else:
similarity_options = ["-M%d%%" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=False)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True,
universal_newlines=False)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before)
is_binary = self.IsImage(filename)
if base_content:
is_binary = is_binary or self.IsBinaryData(base_content)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if hash_after:
new_content = self.GetFileContent(hash_after)
is_binary = is_binary or self.IsBinaryData(new_content)
if not is_binary:
new_content = None
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
LOGGER.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
def UploadFile(filename, data):
form_fields = [("filename", filename)]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload patch for %s. Got %d status code." %
(filename, e.code))
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return ("Uploaded patch for " + filename, [lines[1], filename])
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
filename = patch[0]
data = patch[1]
t = thread_pool.apply_async(UploadFile, args=(filename, data))
threads.append(t)
for t in threads:
result = t.get(timeout=60)
print result[0]
rv.append(result[1])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
elif options.svn_explicit_branches:
v = VCS_ABBREVIATIONS.get('svn')
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
LOGGER.setLevel(logging.DEBUG)
elif verbosity >= 2:
LOGGER.setLevel(logging.INFO)
vcs = GuessVCS(options)
LOGGER.info(vcs)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
LOGGER.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
LOGGER.info("about to get the repo_guid")
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
LOGGER.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
LOGGER.info("about to get the message")
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
ClockworkNet/cw-code-review-upload
|
upload.py
|
Python
|
mit
| 103,273
|
[
"VisIt"
] |
1ed210cf0e3ec1ee4c02fb73d75f0f3d8f55e473b27d646e3d88de5bb64d8349
|
"""initial migration
Revision ID: 995d0aeda211
Revises:
Create Date: 2018-11-24 00:44:55.926212
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '995d0aeda211'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('drop_point',
sa.Column('number', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('removed', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('number')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('password', sa.LargeBinary(), nullable=False),
sa.Column('token', sa.String(length=128), nullable=False),
sa.Column('can_visit', sa.Boolean(), nullable=False),
sa.Column('can_edit', sa.Boolean(), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('must_reset_pw', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('location',
sa.Column('loc_id', sa.Integer(), nullable=False),
sa.Column('dp_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('description', sa.String(length=140), nullable=True),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('lng', sa.Float(), nullable=True),
sa.Column('level', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dp_id'], ['drop_point.number'], ),
sa.PrimaryKeyConstraint('loc_id')
)
op.create_table('report',
sa.Column('rep_id', sa.Integer(), nullable=False),
sa.Column('dp_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('state', sa.Enum('DEFAULT', 'NEW', 'NO_CRATES', 'SOME_BOTTLES', 'REASONABLY_FULL', 'FULL', 'OVERFLOW', 'EMPTY', name='report_states'), nullable=True),
sa.ForeignKeyConstraint(['dp_id'], ['drop_point.number'], ),
sa.PrimaryKeyConstraint('rep_id')
)
op.create_table('visit',
sa.Column('vis_id', sa.Integer(), nullable=False),
sa.Column('dp_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('action', sa.Enum('EMPTIED', 'ADDED_CRATE', 'REMOVED_CRATE', 'RELOCATED', 'REMOVED', 'NO_ACTION', name='visit_actions'), nullable=True),
sa.ForeignKeyConstraint(['dp_id'], ['drop_point.number'], ),
sa.PrimaryKeyConstraint('vis_id')
)
def downgrade():
op.drop_table('visit')
op.drop_table('report')
op.drop_table('location')
op.drop_table('user')
op.drop_table('drop_point')
|
der-michik/c3bottles
|
migrations/versions/995d0aeda211_.py
|
Python
|
mit
| 2,792
|
[
"VisIt"
] |
922b4751418a606a3518856a377f056a9faa8190a38b5c35377dfa1c8c3dd5db
|
#!/usr/bin/env python
# use_express.py
# October 2012 Matthew MacManes (macmanes@gmail.com)
#
# This wrapper is free software: you can redistribute it and/or modify
#
# v.0.3.1 Changes: Do not remake index if already made, added -k30 option to Bowtie2 mapping step
import sys
import subprocess
import optparse
import shutil
import os
from datetime import datetime, date, time
from Bio import SeqIO
import os.path
print ""
print ""
print ""
print "******************************************************************"
print "*** run_express.py v0.3.1 ******"
print "*** To run this program, you must have bowtie2 and eXpress******"
print "*** installed and in your $PATH ******"
print "******************************************************************"
print ""
##########################################
## date function
##########################################
def right_now():
curr_time = datetime.now()
return curr_time.strftime("%c")
##########################################
## Options
##########################################
def getOptions():
parser = optparse.OptionParser(usage="usage: python %prog -b input.fa -t index_name -p [num threads] -X [insert size] -l left.fq -r right.fq -n file.sam]",
version="%prog 0.3.1")
parser.add_option("-b", "--b2base",
dest="b2base",
default="Trinity.fasta",
metavar='file.fa',
help="fasta file for B2 index (?Trinity.fasta)")
parser.add_option("-t", "--target",
dest="target",
metavar='index',
default="index",
help="Name of bowtie2 index",)
parser.add_option("-p", "--threads",
dest="threads",
metavar='[INT]',
default="2",
help="Number of threads to use",)
parser.add_option("-X", "--insert",
dest="insert",
default="500",
metavar='[INT]',
help="Max inner distance",)
parser.add_option("-l", "--left",
dest="left",
metavar='file.fq',
default="",
help="comma sep list of left reads",)
parser.add_option("-r", "--right",
dest="right",
metavar='file.fq',
default="",
help="comma sep list of right reads",)
parser.add_option("-o", "--outdir",
dest="outdir",
metavar='path to output directory',
default=".",
help="output directory",)
parser.add_option("-n", "--name",
dest="name",
metavar='SAM filename',
default="hits.sam",
help="full path filename for SAM file from bowtie2")
parser.add_option("-U", "--unpaired",
dest="unpaired",
metavar='unpaired reads',
default="",
help="full path to unpaired reads")
(options, args) = parser.parse_args()
return options
##########################################
## alignment procedure
##########################################
#def numbering(options, awker):
# with open('%s.tmp' %(options.b2base),'w') as stdout_fh:
# num = subprocess.Popen(['awk', awker, options.b2base], stdout=stdout_fh)
# output = num.communicate()
def b2build(options):
b2b = subprocess.Popen(['bowtie2-build', '%s' % (options.b2base), options.target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = b2b.communicate()
assert b2b.returncode == 0, output[0] + "Bowtie2 build failed\n"
def bowtie2_paired(options):
b2 = subprocess.Popen(['bowtie2', '-k30', '-t', '-p', options.threads, '-X', options.insert, '-x', options.target, '-1', options.left, '-2', options.right, '-U', options.unpaired, '-S', options.name], stdout=subprocess.PIPE)
output = b2.communicate()
assert b2.returncode == 0, output[0] + "Bowtie2 alignment failed\n"
def express(options):
exp = subprocess.Popen(['express', '-o', options.outdir, '-p', options.threads, '%s' % (options.b2base), options.name])
output = exp.communicate()
assert exp.returncode == 0, output[0] + "express failed\n"
##########################################
## alignment depend
##########################################
def checkbowtiebuild():
try:
p = subprocess.Popen(['bowtie-build'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print "Could not find Bowtie2"
print "Make sure that it is properly installed on your path"
sys.exit(1)
def checkbowtie2():
try:
p = subprocess.Popen(['bowtie2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print "Could not find Bowtie2"
print "Make sure that it is properly installed on your path"
sys.exit(1)
def checkexpress():
try:
p = subprocess.Popen(['express'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print "Could not find eXpress"
print "Make sure that it is properly installed on your path"
sys.exit(1)
##########################################
## Master function
##########################################
def main():
options = getOptions()
checkbowtiebuild()
checkbowtie2()
checkexpress()
#numbering(options, awker)
print >> sys.stderr,"\nBuilding Bowtie2 index, If Necessary: [%s] \n" % (right_now())
#b2build(options)
#print options.target+'.1.bt2'
if os.path.exists(options.target+'.1.bt2'):
print >> sys.stderr,"\nLucky You, the Bowtie2 Index Already Exists! I'm going straight to the mapping step. \n"
else:
b2build(options)
print >> sys.stderr,"\nAligning with Bowtie2: [%s] \n" % (right_now())
bowtie2_paired(options)
print >> sys.stderr,"\nCalculating Expression with eXpress: [%s] \n" % (right_now())
express(options)
print >> sys.stderr,"\nDone.. Have a good day! [%s] \n" % (right_now())
if __name__ == "__main__":
main()
|
macmanes/trinityrnaseq
|
util/eXpress_util/use_express.py
|
Python
|
bsd-3-clause
| 6,192
|
[
"Bowtie"
] |
2de6bccbba1f2d1a9bfb20a17ecef2e383a5db0d050e6bf1e780240d5c573f15
|
# -*- coding: utf-8 -*-
"""
Tests for bandwidth selection and calculation.
Author: Padarn Wilson
"""
import numpy as np
from scipy import stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.kde import KDEUnivariate as KDE
from statsmodels.nonparametric.bandwidths import select_bandwidth
from numpy.testing import assert_allclose
# setup test data
np.random.seed(12345)
Xi = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
class TestBandwidthCalculation(object):
def test_calculate_bandwidth_gaussian(self):
bw_expected = [0.29774853596742024,
0.25304408155871411,
0.29781147113698891]
kern = kernels.Gaussian()
bw_calc = [0, 0, 0]
for ii, bw in enumerate(['scott','silverman','normal_reference']):
bw_calc[ii] = select_bandwidth(Xi, bw, kern)
assert_allclose(bw_expected, bw_calc)
class CheckNormalReferenceConstant(object):
def test_calculate_normal_reference_constant(self):
const = self.constant
kern = self.kern
assert_allclose(const, kern.normal_reference_constant, 1e-2)
class TestEpanechnikov(CheckNormalReferenceConstant):
kern = kernels.Epanechnikov()
constant = 2.34
class TestGaussian(CheckNormalReferenceConstant):
kern = kernels.Gaussian()
constant = 1.06
class TestBiweight(CheckNormalReferenceConstant):
kern = kernels.Biweight()
constant = 2.78
class TestTriweight(CheckNormalReferenceConstant):
kern = kernels.Triweight()
constant = 3.15
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
|
huongttlan/statsmodels
|
statsmodels/nonparametric/tests/test_bandwidths.py
|
Python
|
bsd-3-clause
| 1,860
|
[
"Gaussian"
] |
b45fbe539ce2b03fbb0dfc7ddfa6205256026b3707ce14cc586a86a558b66f3d
|
#!/usr/bin/python
import numpy as np
import healpy as hp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
infile = sys.argv[1]
outfile = infile[:-5] + ".png"
print outfile
DPI=300
m = hp.read_map(infile)
hp.mollview(m, title="Mollview image RING")
if sys.argv[2]=='1':
#galactic center
hp.projscatter((17.0+45.0/60+40/3600)/24*360, -29.0-28.0/3600, lonlat=True, marker='x', label="Galactic Center")
#Virgo cluster
hp.projscatter((12.0+27.0/60)/24*360, 12.0+43.0/60, lonlat=True, marker='x', label="Virgo Cluster")
#LMC
hp.projscatter((5.0+23.0/60+34.5/3600)/24*360, -69.0-45.0/60-22.0/3600, lonlat=True, marker='x', label="Large Magellanic Cloud")
#SMC
hp.projscatter((0.0+52.0/60+44.8/3600)/24*360, -72.0-49.0/60-43.0/3600, lonlat=True, marker='x', label="Small Magellanic Cloud")
#Andromeda
hp.projscatter((0.0+42.0/60+44.3/3600)/24*360, 41.0+16.0/60+9.0/3600, lonlat=True, marker='x', label="Andromeda Galaxy")
#Coma cluster
hp.projscatter((12.0+59.0/60+48.7/3600)/24*360, 27.0+58.0/60+50.0/3600, lonlat=True, marker='x', label="Coma Cluster")
#Leo cluster
hp.projscatter((11.0+44.0/60+36.5/3600)/24*360, 19.0+45.0/60+32.0/3600, lonlat=True, marker='x', label="Leo Cluster")
#Norma cluster
hp.projscatter((16.0+15.0/60+32.8/3600)/24*360, -60.0-54.0/60-30.0/3600, lonlat=True, marker='x', label="Norma Cluster")
#centaurus cluster
hp.projscatter((12.0+48.0/60+51.8/3600)/24*360, -41.0-18.0/60-21.0/3600, lonlat=True, marker='x', label="Centaurus Cluster")
#Perseus cluster
hp.projscatter((3.0+18.0/60)/24*360, 41.0+30.0/60, lonlat=True, marker='x', label="Perseus Cluster")
#Taurus void
hp.projscatter((3.0+30.0/60)/24*360, 20.0, lonlat=True, marker='x', label="Taurus Void")
plt.legend()
hp.graticule()
plt.savefig(outfile, dpi=DPI)
|
bencebecsy/galaxy-priors
|
plot_skymap.py
|
Python
|
mit
| 1,865
|
[
"Galaxy"
] |
d4955cd0cf8e289fae3674a31a55e25a26a80bcf42737d050510635547107a40
|
#!/usr/bin/env python
# coding=utf-8
## @package biopredyn
## Copyright: [2012-2019] Cosmo Tech, All Rights Reserved
## License: BSD 3-Clause
import copy
import libsbml
import libsedml
import libsbmlsim
import algorithm, result, statistics
import numpy as np
from cobra.io.sbml import create_cobra_model_from_sbml_doc
from COPASI import *
import libfbc
## Base representation of the execution of an algorithm, independent from the
## model or data set it has to be run with.
class Simulation:
## @var algorithm
# KiSAO identifier of the algorithm to execute.
## @var id
# A unique identifier for this object.
## @var name
# Name of this object.
## @var type
# Type of simulation.
## Constructor; either 'simulation' or 'idf' and 's_type' must be passed as
## keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedSimulation object; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param s_type The type of simulation encoded in 'self'. Possible values for
# s_type are: 'uniformTimeCourse', 'oneStep', 'steadyState' and 'simulation'.
# Optional (default: None).
def __init__(self, simulation=None, idf=None, name=None, s_type=None):
if (simulation is None) and (idf is None or s_type is None):
raise RuntimeError("Either 'simulation' or 'idf' and 's_type' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
self.set_algorithm(algorithm.Algorithm(simulation.getAlgorithm()))
self.id = simulation.getId()
self.name = simulation.getName()
self.type = simulation.getElementName()
elif idf is not None and s_type is not None:
self.id = idf
self.name = name
self.type = s_type
## String representation of this. Displays it as a hierarchy.
# @param self The object pointer.
# @return A string representing this as a hierarchy.
def __str__(self):
tree = " |-" + self.type + " id=" + self.id + " name=" + self.name + "\n"
tree += " |-algorithm " + self.algorithm.get_kisao_id() + "\n"
return tree
## Getter. Returns self.algorithm.
# @param self The object pointer.
# @return self.algorithm
def get_algorithm(self):
return self.algorithm
## Getter. Returns self.id.
# @param self The object pointer.
# @return self.id
def get_id(self):
return self.id
## Setter for self.algorithm.
# @param self The object pointer.
# @param algo A biopredyn.algorithm.Algorithm object.
def set_algorithm(self, algo):
self.algorithm = algo
## Setter for self.id.
# @param self The object pointer.
# @param id New value for self.id.
def set_id(self, id):
self.id = id
## Getter. Returns self.name.
# @param self The object pointer.
def get_name(self):
return self.name
## Setter for self.name.
# @param self The object pointer.
# @param name New value for self.name.
def set_name(self, name):
self.name = name
## Getter. Returns self.type.
# @param self The object pointer.
# @return self.type
def get_type(self):
return self.type
## Simulation-derived class for one step simulations.
class OneStep(Simulation):
## @var step
# Value of the time step to be considered.
## Overridden constructor; either 'simulation' or 'idf' and 'step'
## must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedOneStep element; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param step Size of the time step to integrate; optional (default: None).
def __init__(self, simulation=None, idf=None, name=None, step=None):
if simulation is None and (idf is None or step is None):
raise RuntimeError("Either 'simulation' or 'idf' and 'step' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
self.step = simulation.getStep()
else:
Simulation.__init__(self, idf=idf, name=name, s_type='oneStep')
self.step = step
## Getter. Returns self.step.
# @param self The object pointer.
# @return self.step
def get_step(self):
return self.step
## Run the simulation encoded in self on the input model using the input tool.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run(self, model, tool, res):
# tool selection - by default copasi is chosen
if tool is None or tool == 'copasi':
self.run_as_copasi_one_step(model, res)
else:
raise NameError("Invalid tool name; only 'copasi' is available as a " +
"simulation engine.")
return res
## Run the simulation encoded in self as a Copasi model.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run_as_copasi_one_step(self, model, res):
data_model = CCopasiDataModel()
data_model.importSBMLFromString(model.get_sbml_doc().toSBML())
task = data_model.addTask(CTrajectoryTask.timeCourse)
task.setMethodType(CCopasiMethod.deterministic)
task.processStep(self.get_step())
res.import_from_copasi_time_series(task.getTimeSeries(),
model.get_species_copasi_ids())
return res
## Setter for self.step.
# @param self The object pointer.
# @param step New value for self.step.
def set_step(self, step):
self.step = step
## Returns the libsedml.SedOneStep representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedOneStep object.
def to_sedml(self, level, version):
one = libsedml.SedOneStep(level, version)
one.setId(self.get_id())
if self.get_name() is not None:
one.setName(str(self.get_name()))
one.setStep(self.get_step())
one.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return one
## Simulation-derived class for steady state simulations.
class SteadyState(Simulation):
## Overridden constructor; either 'simulation' or 'idf'
## must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedOneStep element; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
def __init__(self, simulation=None, idf=None, name=None):
if simulation is None and idf is None:
raise RuntimeError("Either 'simulation' or 'idf' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
else:
Simulation.__init__(self, idf=idf, name=name, s_type='steadyState')
## Run the simulation encoded in self on the input model using the input tool.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object.
def run(self, model, tool, res):
# tool selection - by default cobrapy is chosen
if tool is None or tool == 'cobrapy':
self.run_as_cobrapy_problem(model, res)
elif tool == 'libfbc':
self.run_as_libfbc_problem(model, res)
else:
raise NameError("Invalid tool name; available names are 'cobrapy' and " +
" 'libfbc'.")
return res
## Run the simulation encoded in self as a CobraPy model.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object.
def run_as_cobrapy_problem(self, model, res):
if res is None:
res = result.Fluxes()
# Case where the encoded simulation is a FBA
if self.algorithm.get_kisao_id() == "KISAO:0000437":
# Run a basic FBA with cobrapy
cobra_model = create_cobra_model_from_sbml_doc(model.get_sbml_doc())
# Optional model parameters are set
obj = self.algorithm.get_parameter_by_name('objective_function')
sense = self.algorithm.get_parameter_by_name('objective_sense')
if obj is not None:
cobra_model.change_objective([obj.get_value()])
if sense is not None:
cobra_model.optimize(objective_sense=sense.get_value())
else:
cobra_model.optimize()
else:
raise NameError("Invalid KiSAO identifier for a steady state " +
"simulation; see http://bioportal.bioontology.org/ontologies/KISAO " +
"for more information about the KiSAO ontology.")
res.import_from_cobrapy_fba(cobra_model.solution)
return res
## Run the simulation encoded in self as a libFBC problem.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object
def run_as_libfbc_problem(self, model, res):
if res is None:
res = result.Fluxes()
# Case where the encoded simulation is a FBA
if self.algorithm.get_kisao_id() == "KISAO:0000437":
fbc_model = libfbc.FBAProblem()
fbc_model.initFromSBMLString(model.get_sbml_doc().toSBML())
fbc_model.solveProblem()
else:
raise NameError("Invalid KiSAO identifier for a steady state " +
"simulation; see http://bioportal.bioontology.org/ontologies/KISAO " +
"for more information about the KiSAO ontology.")
res.import_from_libfbc_fba(fbc_model.getSolution())
return res
## Returns the libsedml.SedSteadyState representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedSteadyState object.
def to_sedml(self, level, version):
st = libsedml.SedSteadyState(level, version)
st.setId(self.get_id())
if self.get_name() is not None:
st.setName(str(self.get_name()))
st.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return st
## Simulation-derived class for uniform time course simulations.
class UniformTimeCourse(Simulation):
## @var initial_time
# Time point where the simulation begins.
## @var number_of_points
# Number of time points to consider between output_start_time and
# output_end_time.
## @var output_end_time
# Time point where both the simulation and the result collection end.
## @var output_start_time
# Time point where the result collection starts; not necessarily the same as
# initial_time.
## Overridden constructor; either 'simulation' or 'idf', 'start', 'end',
## 'out_st' and 'pts' must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedUniformTimeCourse element; optional
# (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param start Time point where the simulation begins; optional (default:
# None).
# @param end Time point where both the simulation and the result collection
# end; optional (default: None).
# @param out_st Time point where the result collection starts; optional
# (default: None).
# @param pts Number of time points between 'out_st' and 'end'; optional
# (default: None).
def __init__(self, simulation=None, idf=None, name=None, start=None, end=None,
out_st=None, pts=None):
if simulation is None and (idf is None or start is None or end is None or
out_st is None or pts is None):
raise RuntimeError("Either 'simulation' or 'idf', 'start', 'end', " +
"'out_st' and 'pts' must be passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
self.initial_time = simulation.getInitialTime()
self.number_of_points = simulation.getNumberOfPoints()
self.output_end_time = simulation.getOutputEndTime()
self.output_start_time = simulation.getOutputStartTime()
else:
Simulation.__init__(self, idf=idf, name=name,
s_type='uniformTimeCourse')
self.initial_time = start
self.number_of_points = pts
self.output_end_time = end
self.output_start_time = out_st
## Overridden string representation of this. Displays it as a hierarchy.
# @param self The object pointer.
# @return A string representing this as a hierarchy.
def __str__(self):
tree = " |-" + self.type + " id=" + self.id + " name=" + self.name
tree += " initialTime" + str(self.initial_time)
tree += " numberOfPoints" + str(self.number_of_points)
tree += " outputEndTime" + str(self.output_end_time)
tree += " outputStartTime" + str(self.output_start_time) + "\n"
tree += " |-algorithm " + self.algorithm.get_kisao_id() + "\n"
return tree
## Getter. Returns self.initial_time.
# @param self The object pointer.
# @return self.initial_time
def get_initial_time(self):
return self.initial_time
## Getter. Returns self.number_of_points.
# @param self The object pointer.
# @return self.number_of_points
def get_number_of_points(self):
return self.number_of_points
## Getter. Returns self.output_end_time.
# @param self The object pointer.
# @return self.output_end_time
def get_output_end_time(self):
return self.output_end_time
## Getter. Returns self.output_start_time.
# @param self The object pointer.
# @return self.output_start_time
def get_output_start_time(self):
return self.output_start_time
## Run the simulation encoded in self on the input model using the input tool,
## and returns its output as a biopredyn.result.TimeSeries object.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run(self, model, tool, res):
# tool selection - by default libsbmlsim is chosen
if tool is None or tool == 'libsbmlsim':
self.run_as_libsbmlsim_time_course(model, res)
elif tool == 'copasi':
self.run_as_copasi_time_course(model, res)
else:
raise NameError("Invalid tool name; available names are 'copasi' and 'libsbmlsim'.")
return res
## Run this as a COPASI time course and import its result.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object where simulation results
# will be written.
# @param unknowns A list of N identifiers corresponding to the IDs of unknown
# parameters in model. If not None, the simulation will be run with the
# values listed in fitted_values for the unknown parameters. Default: None.
# @param fitted_values A list of N values corresponding to the N unknowns.
# @return A biopredyn.result.TimeSeries object.
def run_as_copasi_time_course(
self, model, res, unknowns=None, fitted_values=None):
if res is None:
res = result.TimeSeries()
steps = self.get_number_of_points()
start = self.get_initial_time()
o_start = self.get_output_start_time()
end = self.get_output_end_time()
step = (end - o_start) / steps
duration = end - start
mod = model.get_sbml_doc()
# Importing model to COPASI
data_model = CCopasiDataModel()
data_model.importSBMLFromString(mod.toSBML())
cop_model = data_model.getModel()
# unknown parameter assignment
if unknowns is not None:
for u in range(len(unknowns)):
unknown = unknowns[u]
for r in range(cop_model.getReactions().size()):
reaction = cop_model.getReaction(r)
for p in range(reaction.getParameters().size()):
param = reaction.getParameters().getParameter(p)
if param.getObjectName() == unknown:
if reaction.isLocalParameter(p): # local case
reaction.setParameterValue(unknown, fitted_values[u])
else: # global case
cop_model.getModelValues().getByName(unknown).setInitialValue(
fitted_values[u])
task = data_model.addTask(CTrajectoryTask.timeCourse)
pbm = task.getProblem()
# Set the parameters
pbm.setOutputStartTime(o_start)
pbm.setStepSize(step)
pbm.setDuration(duration)
pbm.setTimeSeriesRequested(True)
# TODO: acquire KiSAO description of the algorithm
task.setMethodType(CCopasiMethod.deterministic)
# Execution - initial values are used
task.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES)
# Time series extraction
res.import_from_copasi_time_series(task.getTimeSeries(),
model.get_species_copasi_ids())
return res
## Run this as a libSBMLSim time course and import its result.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object where simulation results
# will be written.
# @return A biopredyn.result.TimeSeries object.
# TODO: add option for setting parameter values before running
def run_as_libsbmlsim_time_course(self, model, res):
if res is None:
res = result.TimeSeries()
steps = self.get_number_of_points()
start = self.get_output_start_time()
end = self.get_output_end_time()
step = (end - start) / steps
mod = model.get_sbml_doc()
# TODO: acquire KiSAO description of the algorithm
r = libsbmlsim.simulateSBMLFromString(
mod.toSBML(),
end,
step,
1,
0,
libsbmlsim.MTHD_RUNGE_KUTTA,
0)
res.import_from_libsbmlsim(r, start)
return res
## Use the parameter of the simulation to estimate the input model parameters
## with respect to the input data file. Uses COPASI as simulation engine.
# @param self The object pointer.
# @param mod A biopredyn.model.Model object.
# @param cal_data Path to a column-aligned CSV file containing the
# calibration data.
# @param val_data Path to a column-aligned CSV file containing the
# validation data.
# @param observables A list of identifier corresponding to the IDs of the
# observables to consider (both in model and data file).
# @param unknowns A list of identifier corresponding to the IDs of the
# parameters to be estimated in the input model.
# @param min_unknown_values A list of numerical values; lower bound of the
# parameter value ranges.
# @param max_unknown_values A list of numerical values; upper bound of the
# parameter value ranges.
# @param algorithm A CCopasiMethod::SubType object describing the algorithm
# to be used.
# @param rm A biopredyn.resources.ResourceManager object.
# return statistics A biopredyn.statistics.Statistics object.
def run_as_parameter_estimation(self, mod, cal_data, val_data, observables,
unknowns, min_unknown_values, max_unknown_values, algorithm, rm):
data_model = CCopasiDataModel()
data_model.importSBMLFromString(mod.get_sbml_doc().toSBML())
# importing data
data = result.TimeSeries()
metabolites = data.import_from_csv_file(cal_data, rm)
steps = len(data.get_time_steps())
# task definition
fit_task = data_model.addTask(CFitTask.parameterFitting)
fit_problem = fit_task.getProblem()
# experiment definition
experiment_set = fit_problem.getParameter("Experiment Set")
experiment = CExperiment(data_model)
experiment.setFileName(cal_data)
experiment.setSeparator(",")
experiment.setFirstRow(1) # offset due to header
experiment.setLastRow(steps + 1)
experiment.setHeaderRow(1)
experiment.setExperimentType(CCopasiTask.timeCourse)
experiment.setNumColumns(len(metabolites))
object_map = experiment.getObjectMap()
object_map.setNumCols(len(metabolites))
model = data_model.getModel()
# assigning roles and names with respect to the content of the data file
index = 0
for name in metabolites:
if str.lower(name).__contains__("time"):
# case where the current 'metabolite' is time
object_map.setRole(index, CExperiment.time)
time_reference = model.getObject(CCopasiObjectName("Reference=Time"))
object_map.setObjectCN(index, time_reference.getCN().getString())
elif name in observables:
# case where the current metabolite is an observable
for m in range(model.getMetabolites().size()):
meta = model.getMetabolites().get(m)
if (meta.getSBMLId() == name):
metab_object = meta.getObject(
CCopasiObjectName("Reference=Concentration"))
object_map.setRole(index, CExperiment.dependent)
object_map.setObjectCN(index, metab_object.getCN().getString())
index += 1
experiment_set.addExperiment(experiment)
experiment = experiment_set.getExperiment(0)
# definition of the fitted object - i.e. the parameters listed in unknowns
opt_item_group = fit_problem.getParameter("OptimizationItemList")
for u in range(len(unknowns)):
unknown = unknowns[u]
for r in range(model.getReactions().size()):
reaction = model.getReaction(r)
for p in range(reaction.getParameters().size()):
param = reaction.getParameters().getParameter(p)
if param.getObjectName() == unknown:
if reaction.isLocalParameter(p): # case of a local parameter
fit_item = CFitItem(data_model)
fit_item.setObjectCN(
param.getObject(CCopasiObjectName("Reference=Value")).getCN())
fit_item.setStartValue(param.getValue())
fit_item.setLowerBound(
CCopasiObjectName(str(min_unknown_values[u])))
fit_item.setUpperBound(
CCopasiObjectName(str(max_unknown_values[u])))
opt_item_group.addParameter(fit_item)
else: # case of a global parameter
parameter = model.getModelValues().getByName(unknown)
exists = False
for fit in range(opt_item_group.size()):
if opt_item_group.getParameter(fit).getCN() == parameter.getCN():
exists = True # parameter already exists as a CFitItem
break
if not exists:
fit_item = CFitItem(data_model)
fit_item.setObjectCN(parameter.getObject(CCopasiObjectName(
"Reference=InitialValue")).getCN())
fit_item.setStartValue(param.getValue())
fit_item.setLowerBound(
CCopasiObjectName(str(min_unknown_values[u])))
fit_item.setUpperBound(
CCopasiObjectName(str(max_unknown_values[u])))
opt_item_group.addParameter(fit_item)
fit_task.setMethodType(algorithm)
fit_task.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES)
# extracting values of the fitted parameters
fitted_param = []
for p in range(opt_item_group.size()):
opt_item = opt_item_group.getParameter(p)
fitted_param.append(opt_item.getLocalValue())
# extracting Fisher Information Matrix from fit_problem
fisher = fit_problem.getFisher()
f_mat = []
for row in range(fisher.numRows()):
r = []
for col in range(fisher.numCols()):
r.append(fisher.get(row, col))
f_mat.append(r)
f_mat = np.mat(f_mat)
stats = statistics.Statistics(
val_data, data, copy.deepcopy(self), mod, fit_problem.getSolutionValue(),
observables, unknowns, fitted_param, f_mat, rm)
return stats
## Setter. Assign a new value to self.initial_time.
# @param self The object pointer.
# @param initial_time New value for self.initial_time.
def set_initial_time(self, initial_time):
self.initial_time = initial_time
## Setter. Assign a new value to self.number_of_points.
# @param self The object pointer.
# @param number_of_points New value of self.number_of_points.
def set_number_of_points(self, number_of_points):
self.number_of_points = number_of_points
## Setter. Assign a new value to self.output_end_time.
# @param self The object pointer.
# @param output_end_time New value of self.output_end_time.
def set_output_end_time(self, output_end_time):
self.output_end_time = output_end_time
## Setter. Assign a new value to self.output_start_time.
# @param self The object pointer.
# @param output_start_time New value for self.output_start_time.
def set_output_start_time(self, output_start_time):
self.output_start_time = output_start_time
## Returns the libsedml.SedUniformTimeCourse representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedUniformTimeCourse object.
def to_sedml(self, level, version):
sim = libsedml.SedUniformTimeCourse(level, version)
sim.setId(self.get_id())
if self.get_name() is not None:
sim.setName(str(self.get_name()))
sim.setInitialTime(self.get_initial_time())
sim.setOutputStartTime(self.get_output_start_time())
sim.setOutputEndTime(self.get_output_end_time())
sim.setNumberOfPoints(self.get_number_of_points())
sim.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return sim
|
TheCoSMoCompany/biopredyn
|
Prototype/python/biopredyn/simulation.py
|
Python
|
bsd-3-clause
| 26,014
|
[
"COPASI"
] |
f5e15895ee7167c604661bc5a9381f48c8a2fa0a109d09ef2a0a343da3af4cfb
|
# MIT License
#
# Copyright (c) 2016 Anders Steen Christensen, Felix A. Faber, Lars A. Bratholm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import numpy as np
from .fkernels import fgaussian_kernel
from .fkernels import flaplacian_kernel
from .fkernels import flinear_kernel
from .fkernels import fsargan_kernel
from .fkernels import fmatern_kernel_l2
from .fkernels import fget_local_kernels_gaussian
from .fkernels import fget_local_kernels_laplacian
def laplacian_kernel(A, B, sigma):
""" Calculates the Laplacian kernel matrix K, where :math:`K_{ij}`:
:math:`K_{ij} = \\exp \\big( -\\frac{\\|A_i - B_j\\|_1}{\sigma} \\big)`
Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of representations - shape (N, representation size).
:type A: numpy array
:param B: 2D array of representations - shape (M, representation size).
:type B: numpy array
:param sigma: The value of sigma in the kernel matrix.
:type sigma: float
:return: The Laplacian kernel matrix - shape (N, M)
:rtype: numpy array
"""
na = A.shape[0]
nb = B.shape[0]
K = np.empty((na, nb), order='F')
# Note: Transposed for Fortran
flaplacian_kernel(A.T, na, B.T, nb, K, sigma)
return K
def gaussian_kernel(A, B, sigma):
""" Calculates the Gaussian kernel matrix K, where :math:`K_{ij}`:
:math:`K_{ij} = \\exp \\big( -\\frac{\\|A_i - B_j\\|_2^2}{2\sigma^2} \\big)`
Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of representations - shape (N, representation size).
:type A: numpy array
:param B: 2D array of representations - shape (M, representation size).
:type B: numpy array
:param sigma: The value of sigma in the kernel matrix.
:type sigma: float
:return: The Gaussian kernel matrix - shape (N, M)
:rtype: numpy array
"""
na = A.shape[0]
nb = B.shape[0]
K = np.empty((na, nb), order='F')
# Note: Transposed for Fortran
fgaussian_kernel(A.T, na, B.T, nb, K, sigma)
return K
def linear_kernel(A, B):
""" Calculates the linear kernel matrix K, where :math:`K_{ij}`:
:math:`K_{ij} = A_i \cdot B_j`
VWhere :math:`A_{i}` and :math:`B_{j}` are representation vectors.
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of representations - shape (N, representation size).
:type A: numpy array
:param B: 2D array of representations - shape (M, representation size).
:type B: numpy array
:return: The Gaussian kernel matrix - shape (N, M)
:rtype: numpy array
"""
na = A.shape[0]
nb = B.shape[0]
K = np.empty((na, nb), order='F')
# Note: Transposed for Fortran
flinear_kernel(A.T, na, B.T, nb, K)
return K
def sargan_kernel(A, B, sigma, gammas):
""" Calculates the Sargan kernel matrix K, where :math:`K_{ij}`:
:math:`K_{ij} = \\exp \\big( -\\frac{\\| A_i - B_j \\|_1)}{\sigma} \\big) \\big(1 + \\sum_{k} \\frac{\gamma_{k} \\| A_i - B_j \\|_1^k}{\sigma^k} \\big)`
Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of representations - shape (N, representation size).
:type A: numpy array
:param B: 2D array of representations - shape (M, representation size).
:type B: numpy array
:param sigma: The value of sigma in the kernel matrix.
:type sigma: float
:param gammas: 1D array of parameters in the kernel matrix.
:type gammas: numpy array
:return: The Sargan kernel matrix - shape (N, M).
:rtype: numpy array
"""
ng = len(gammas)
if ng == 0:
return laplacian_kernel(A, B, sigma)
na = A.shape[0]
nb = B.shape[0]
K = np.empty((na, nb), order='F')
# Note: Transposed for Fortran
fsargan_kernel(A.T, na, B.T, nb, K, sigma, gammas, ng)
return K
def matern_kernel(A, B, sigma, order = 0, metric = "l1"):
""" Calculates the Matern kernel matrix K, where :math:`K_{ij}`:
for order = 0:
:math:`K_{ij} = \\exp\\big( -\\frac{d}{\sigma} \\big)`
for order = 1:
:math:`K_{ij} = \\exp\\big( -\\frac{\\sqrt{3} d}{\sigma} \\big) \\big(1 + \\frac{\\sqrt{3} d}{\sigma} \\big)`
for order = 2:
:math:`K_{ij} = \\exp\\big( -\\frac{\\sqrt{5} d}{d} \\big) \\big( 1 + \\frac{\\sqrt{5} d}{\sigma} + \\frac{5 d^2}{3\sigma^2} \\big)`
Where :math:`A_i` and :math:`B_j` are representation vectors, and d is a distance measure.
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of representations - shape (N, representation size).
:type A: numpy array
:param B: 2D array of representations - shape (M, representation size).
:type B: numpy array
:param sigma: The value of sigma in the kernel matrix.
:type sigma: float
:param order: The order of the polynomial (0, 1, 2)
:type order: integer
:param metric: The distance metric ('l1', 'l2')
:type metric: string
:return: The Matern kernel matrix - shape (N, M)
:rtype: numpy array
"""
if metric == "l1":
if order == 0:
gammas = []
elif order == 1:
gammas = [1]
sigma /= np.sqrt(3)
elif order == 2:
gammas = [1,1/3.0]
sigma /= np.sqrt(5)
else:
print("Order:%d not implemented in Matern Kernel" % order)
raise SystemExit
return sargan_kernel(A, B, sigma, gammas)
elif metric == "l2":
pass
else:
print("Error: Unknown distance metric %s in Matern kernel" % str(metric))
raise SystemExit
na = A.shape[0]
nb = B.shape[0]
K = np.empty((na, nb), order='F')
# Note: Transposed for Fortran
fmatern_kernel_l2(A.T, na, B.T, nb, K, sigma, order)
return K
def get_local_kernels_gaussian(A, B, na, nb, sigmas):
""" Calculates the Gaussian kernel matrix K, for a local representation where :math:`K_{ij}`:
:math:`K_{ij} = \sum_{a \in i} \sum_{b \in j} \\exp \\big( -\\frac{\\|A_a - B_b\\|_2^2}{2\sigma^2} \\big)`
Where :math:`A_{a}` and :math:`B_{b}` are representation vectors.
Note that the input array is one big 2D array with all atoms concatenated along the same axis.
Further more a series of kernels is produced (since calculating the distance matrix is expensive
but getting the resulting kernels elements for several sigmas is not.)
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of descriptors - shape (total atoms A, representation size).
:type A: numpy array
:param B: 2D array of descriptors - shape (total atoms B, representation size).
:type B: numpy array
:param na: 1D array containing numbers of atoms in each compound.
:type na: numpy array
:param nb: 1D array containing numbers of atoms in each compound.
:type nb: numpy array
:param sigma: The value of sigma in the kernel matrix.
:type sigma: float
:return: The Gaussian kernel matrix - shape (nsigmas, N, M)
:rtype: numpy array
"""
assert np.sum(na) == A.shape[0], "Error in A input"
assert np.sum(nb) == B.shape[0], "Error in B input"
assert A.shape[1] == B.shape[1], "Error in representation sizes"
nma = len(na)
nmb = len(nb)
sigmas = np.asarray(sigmas)
nsigmas = len(sigmas)
return fget_local_kernels_gaussian(A.T, B.T, na, nb, sigmas, nma, nmb, nsigmas)
def get_local_kernels_laplacian(A, B, na, nb, sigmas):
""" Calculates the Local Laplacian kernel matrix K, for a local representation where :math:`K_{ij}`:
:math:`K_{ij} = \sum_{a \in i} \sum_{b \in j} \\exp \\big( -\\frac{\\|A_a - B_b\\|_1}{\sigma} \\big)`
Where :math:`A_{a}` and :math:`B_{b}` are representation vectors.
Note that the input array is one big 2D array with all atoms concatenated along the same axis.
Further more a series of kernels is produced (since calculating the distance matrix is expensive
but getting the resulting kernels elements for several sigmas is not.)
K is calculated using an OpenMP parallel Fortran routine.
:param A: 2D array of descriptors - shape (N, representation size).
:type A: numpy array
:param B: 2D array of descriptors - shape (M, representation size).
:type B: numpy array
:param na: 1D array containing numbers of atoms in each compound.
:type na: numpy array
:param nb: 1D array containing numbers of atoms in each compound.
:type nb: numpy array
:param sigmas: List of the sigmas.
:type sigmas: list
:return: The Laplacian kernel matrix - shape (nsigmas, N, M)
:rtype: numpy array
"""
assert np.sum(na) == A.shape[0], "Error in A input"
assert np.sum(nb) == B.shape[0], "Error in B input"
assert A.shape[1] == B.shape[1], "Error in representation sizes"
nma = len(na)
nmb = len(nb)
sigmas = np.asarray(sigmas)
nsigmas = len(sigmas)
return fget_local_kernels_laplacian(A.T, B.T, na, nb, sigmas, nma, nmb, nsigmas)
|
qmlcode/qml
|
qml/kernels.py
|
Python
|
mit
| 10,752
|
[
"Gaussian"
] |
fd32dbaa217ed20608dd763ac9953a20c50ff57fa817bbda20b703446d14f23a
|
"""
General utility module.
"""
# Copyright (c) 2017 Ben Zimmer. All rights reserved.
import pickle
import dill
import numpy as np
from sklearn.neighbors import KernelDensity
dill.settings["recurse"] = True
def load(input_filename):
"""unpickle an object from a file"""
with open(input_filename, "rb") as input_file:
res = pickle.load(input_file)
return res
def save(obj, output_filename):
"""pickle an object to a file"""
with open(output_filename, "wb") as output_file:
pickle.dump(obj, output_file)
def load_dill(input_filename):
"""undill an object from a file"""
with open(input_filename, "rb") as input_file:
res = dill.load(input_file)
return res
def save_dill(obj, output_filename):
"""dill an object to a file"""
with open(output_filename, "wb") as output_file:
dill.dump(obj, output_file)
def patch_image(bmps, width=16, height=16):
"""combine equally sized smaller images into a larger image"""
if not bmps:
return np.zeros((16, 16), dtype=np.uint8)
# TODO: get rid of default values for width and height
patch_height = bmps[0].shape[0]
patch_width = bmps[0].shape[1]
if len(bmps[0].shape) == 2:
grayscale = True
else:
grayscale = False
res = np.zeros(
(height * patch_height, width * patch_width, 3),
dtype=np.uint8)
for idx in range(min(len(bmps), width * height)):
col = (idx % width) * patch_width
row = int(idx / width) * patch_height
bmp = bmps[idx]
if grayscale:
bmp = np.expand_dims(bmp, 2).repeat(3, 2)
res[row:(row + patch_height), col:(col + patch_width), :] = bmps[idx]
return res
def find_peak_idxs(data, data_range, bandwidth, visualize=False):
"""find locations of peaks in a KDE"""
# build 1D KDE of r values
kde = KernelDensity(kernel="gaussian", bandwidth=bandwidth).fit(
data.reshape(-1, 1))
log_density = kde.score_samples(
data_range.reshape(-1, 1))
density = np.exp(log_density)
# find peaks in density function
d_density = np.diff(density)
peak_idxs = [idx + 1 for idx, x in enumerate(zip(d_density[:-1], d_density[1:]))
if x[0] >= 0.0 and x[1] < 0.0]
if len(peak_idxs) == 0:
peak_idxs = [np.argmax(density)]
if visualize:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(data_range, density, color="blue")
plt.plot(data_range[:-1], d_density, color="red")
for peak_idx in peak_idxs:
plt.axvline(x=data_range[peak_idx], color="green")
plt.grid(True)
plt.show(block=False)
return peak_idxs, [density[idx] for idx in peak_idxs]
def mbs(arrays):
"""find the approximate size of a list of numpy arrays in MiB"""
total = 0.0
for array in arrays:
total += array.nbytes / 1048576.0
return np.round(total, 3)
|
bdzimmer/handwriting
|
handwriting/util.py
|
Python
|
bsd-3-clause
| 2,953
|
[
"Gaussian"
] |
2a0699f67f7533555181c8fe6fca73aba36459dfca4edd1535581eeb5915b1b0
|
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module defines the ExportTemplateContext, which is a dictionary
used to set the template parameters when exporting.
Export template parameters supported::
generator
.name -- "Zim x.xx"
.user
title
navigation - links to other export pages (if not included here)
home
up
prev -- prev export file or None
next -- next export file or None
links -- links to other export pages (index & plugins / ...) - sorted dict to have Index, Home first followed by plugins
link
.name
.basename
pages -- iter over special + content
.special -- iter special pages to be included (index / plugins / ...) - support get() as well here
.content -- iter pages being exported
page
.title -- heading or basename
.name / .section / .basename
.heading
.body -- full body minus first heading
.content -- heading + body
.headings(max_level) -- iter over headings
headingsection
.level
.heading
.body
.content
.links
.backlinks
.attachments
file
.basename
.mtime
.size
options -- dict with template options (for format)
toc([page]) -- iter of headings in this page or all of pages
index([section]) -- index of full export job, not just in this page
uri(link|file)
resource(file)
anchor(page|section)
From template base::
range() / len() / sorted() / reversed()
strftime()
strfcal()
Test in a template for single page export use: "IF loop.first and loop.last"
'''
import os
from functools import partial
import logging
logger = logging.getLogger('zim.export')
from zim import __version__ as ZIM_VERSION
import zim.datetimetz as datetime
from zim.utils import DefinitionOrderedDict
from zim.fs import format_file_size
from zim.notebook import Path, LINK_DIR_BACKWARD, LINK_DIR_FORWARD
from zim.formats import ParseTree, ParseTreeBuilder, Visitor, \
FORMATTEDTEXT, BULLETLIST, LISTITEM, STRONG, LINK, HEADING
from zim.templates import TemplateContextDict
from zim.templates.functions import ExpressionFunction
from zim.newfs import FileNotFoundError
from zim.notebook.index import IndexNotFoundError
from zim.notebook import Path
class ExportTemplateContext(dict):
# No need to inherit from TemplateContextDict here, the template
# will do a copy first anyway to protect changing content in this
# object. This means functions and proxies can assume this dict is
# save, and only "options" is un-save input.
#
# This object is not intended for re-use -- just instantiate a
# new one for each export page
def __init__(self, notebook, linker_factory, dumper_factory,
title, content, special=None,
home=None, up=None, prevpage=None, nextpage=None,
links=None,
index_generator=None, index_page=None,
):
'''Constructor
When exporting one notebook page per export page ("multi file"),
'C{content}' is a list of one page everytime. Even for exporting
special pages, they go into 'C{content}' one at a time.
The special pages are linked in 'C{links}' so the template can
refer to them.
When exporting multiple notebook pages to a single export page
("single file"), 'C{content}' is a list of all notebook pages a
nd 'C{special}' a list.
@param notebook: L{Notebook} object
@param linker_factory: function producing L{ExportLinker} objects
@param dumper_factory: function producing L{DumperClass} objects
@param title: the export page title
@param content: list of notebook pages to be exported
@param special: list of special notebook pages to be exported if any
@param home: link to home page if any
@param up: link to parent export page if any
@param prevpage: link to previous export page if any
@param nextpage: link to next export page if any
@param links: list of links to special pages if any, links are
given as a 2-tuple of a key and a target (either a L{Path} or
a L{NotebookPathProxy})
@param index_generator: a generator function or that
provides L{Path} or L{Page} objects to be used for the
the C{index()} function. This method should take a single
argument for the root namespace to show.
See the definition of L{Index.walk()} or L{PageSelection.index()}.
@param index_page: the current page to show in the index if any
'''
# TODO get rid of need of notebook here!
template_options = TemplateContextDict({}) # can be modified by template
self._content = content
self._linker_factory = linker_factory
self._dumper_factory = partial(dumper_factory, template_options=template_options)
self._index_generator = index_generator or content
self._index_page = index_page
self.linker = linker_factory()
def _link(l):
if isinstance(l, str):
return UriProxy(l)
elif isinstance(l, Path):
return NotebookPathProxy(l)
else:
assert l is None or isinstance(l, (NotebookPathProxy, FileProxy))
return l
if special:
pages = ExportTemplatePageIter(
special=PageListProxy(notebook, special, self._dumper_factory, self._linker_factory),
content=PageListProxy(notebook, content, self._dumper_factory, self._linker_factory)
)
else:
pages = ExportTemplatePageIter(
content=PageListProxy(notebook, content, self._dumper_factory, self._linker_factory)
)
self.update({
# Parameters
'generator': {
'name': 'Zim %s' % ZIM_VERSION,
'user': os.environ['USER'], # TODO allow user name in prefs ?
},
'title': title,
'navigation': {
'home': _link(home),
'up': _link(up),
'prev': _link(prevpage),
'next': _link(nextpage),
},
'links': DefinitionOrderedDict(), # keep order of links for iteration
'pages': pages,
# Template settings
'options': template_options, # can be modified by template
# Functions
#~ 'toc': self.toc_function,
'index': self.index_function,
'pageindex': self.index_function, # backward compatibility
'uri': self.uri_function,
'anchor': self.anchor_function,
'resource': self.resource_function,
})
if links:
for k, l in list(links.items()):
l = _link(l)
self['links'][k] = l
def get_dumper(self, page):
'''Returns a L{DumperClass} instance for source page C{page}
Only template options defined before this method is called are
included, so only construct the "dumper" when you are about to
use it
'''
linker = self._linker_factory(source=page)
return self._dumper_factory(linker)
#~ @ExpressionFunction
#~ def toc_function(self):
#~ # TODO
#~ # needs way to link heading achors in exported code (html)
#~ # pass these anchors through the parse tree
#~
#~ builder = ParseTreeBuilder()
#~ builder.start(FORMATTEDTEXT)
#~ builder.start(BULLETLIST)
#~ for page in self._content:
#~ current = 1
#~ for level, heading in ...:
#~ if level > current:
#~ for range(current, level):
#~ builder.start(BULLETLIST)
#~ current = level
#~ elif level < current:
#~ for range(level, current):
#~ builder.end(BULLETLIST)
#~ current = level
#~ builder.start(LISTITEM)
#~ builder.append(LINK, {'href': ...}, anchor)
#~ builder.end(LISTITEM)
#~ for range(1, current):
#~ builder.end(BULLETLIST)
#~
#~ builder.end(BULLETLIST)
#~ builder.end(FORMATTEDTEXT)
#~ tree = builder.get_parsetree()
#~ if not tree:
#~ return ''
#~ print("!!!", tree.tostring())
#~ dumper = self.get_dumper(None)
#~ return ''.join(dumper.dump(tree))
@ExpressionFunction
def index_function(self, namespace=None, collapse=True, ignore_empty=True):
'''Index function for export template
@param namespace: the namespace to include
@param collapse: if C{True} only the branch of the current page
is shown, if C{False} the whole index is shown
@param ignore_empty: if C{True} empty pages (placeholders) are
not shown in the index
'''
if not self._index_generator:
return ''
builder = ParseTreeBuilder()
builder.start(FORMATTEDTEXT)
if self._index_page:
expanded = [self._index_page] + list(self._index_page.parents())
else:
expanded = []
stack = []
if isinstance(namespace, PageProxy):
namespace = Path(namespace.name)
elif isinstance(namespace, str):
namespace = Path(namespace)
for path in self._index_generator(namespace):
logger.info(path)
if self._index_page and collapse \
and not path.parent in expanded:
continue # skip since it is not part of current path
#elif ignore_empty and not (path.hascontent or path.haschildren): - bug, should be page.hascontent, page.haschildren
# continue # skip since page is empty
if not stack:
stack.append(path.parent)
builder.start(BULLETLIST)
elif stack[-1] != path.parent:
if path.ischild(stack[-1]):
builder.start(BULLETLIST)
stack.append(path.parent)
else:
while stack and stack[-1] != path.parent:
builder.end(BULLETLIST)
stack.pop()
builder.start(LISTITEM)
if path == self._index_page:
# Current page is marked with the strong style
builder.append(STRONG, text=path.basename)
else:
# links to other pages
builder.append(LINK,
{'type': 'page', 'href': ':' + path.name},
path.basename)
builder.end(LISTITEM)
for p in stack:
builder.end(BULLETLIST)
builder.end(FORMATTEDTEXT)
tree = builder.get_parsetree()
if not tree:
return ''
#~ print("!!!", tree.tostring())
dumper = self.get_dumper(None)
return ''.join(dumper.dump(tree))
@ExpressionFunction
def uri_function(self, link):
if isinstance(link, UriProxy):
return link.uri
elif isinstance(link, NotebookPathProxy):
return self.linker.page_object(link._path)
elif isinstance(link, FilePathProxy):
file = link._dest_file or link._file
return self.linker.file_object(file)
elif isinstance(link, str):
return self.linker.link(link)
else:
return None
@ExpressionFunction
def anchor_function(self, page):
# TODO remove prefix from anchors?
if isinstance(page, (PageProxy, NotebookPathProxy)):
return page.name
else:
return page
@ExpressionFunction
def resource_function(self, link):
return self.linker.resource(link)
class ExportTemplatePageIter(object):
def __init__(self, special=None, content=None):
self.special = special or []
self.content = content or []
def __iter__(self):
for p in self.special:
yield p
for p in self.content:
yield p
class HeadingSplitter(Visitor):
def __init__(self, max_level=None):
self.max_level = max_level or 999
self._builder = ParseTreeBuilder()
self.headings = []
def _split(self):
self._builder.end(FORMATTEDTEXT)
tree = self._builder.get_parsetree()
if tree.hascontent:
self.headings.append(tree)
self._builder = ParseTreeBuilder()
self._builder.start(FORMATTEDTEXT)
def _close(self):
tree = self._builder.get_parsetree()
if tree.hascontent:
self.headings.append(tree)
def start(self, tag, attrib=None):
if tag is HEADING and int(attrib['level']) <= self.max_level:
self._split()
self._builder.start(tag, attrib)
def end(self, tag):
self._builder.end(tag)
if tag == FORMATTEDTEXT:
self._close()
def text(self, text):
self._builder.text(text)
def append(self, tag, attrib=None, text=None):
if tag is HEADING and int(attrib['level']) <= self.max_level:
self._split()
self._builder.append(tag, attrib, text)
class PageListProxy(object):
def __init__(self, notebook, iterable, dumper_factory, linker_factory):
self._notebook = notebook
self._iterable = iterable
self._dumper_factory = dumper_factory
self._linker_factory = linker_factory
def __iter__(self):
for page in self._iterable:
linker = self._linker_factory(source=page)
dumper = self._dumper_factory(linker)
yield PageProxy(self._notebook, page, dumper, linker)
class ParseTreeProxy(object):
@property
def meta(self):
if self._tree:
return self._tree.meta or {}
else:
return {}
@property
def heading(self):
head, body = self._split_head()
return head
@property
def body(self):
try:
head, body = self._split_head()
if body:
lines = self._dumper.dump(body)
return ''.join(lines)
else:
return ''
except:
logger.exception('Exception exporting page: %s', self._page.name)
raise # will result in a "no such parameter" kind of error
@property
def content(self):
try:
if self._tree:
lines = self._dumper.dump(self._tree)
return ''.join(lines)
else:
return ''
except:
logger.exception('Exception exporting page: %s', self._page.name)
raise # will result in a "no such parameter" kind of error
def _split_head(self):
if not hasattr(self, '_severed_head'):
if self._tree:
tree = self._tree.copy()
head = tree.get_heading_text()
tree.remove_heading()
self._severed_head = (head, tree)
else:
self._severed_head = (None, None)
return self._severed_head
class PageProxy(ParseTreeProxy):
def __init__(self, notebook, page, dumper, linker):
self._notebook = notebook
self._page = page
self._tree = page.get_parsetree()
self._dumper = dumper
self._linker = linker
self.name = self._page.name
self.section = self._page.namespace
self.namespace = self._page.namespace # backward compat
self.basename = self._page.basename
self.properties = {} # undocumented field kept for backward compat
@property
def title(self):
return self.heading or self.basename
@ExpressionFunction
def headings(self, max_level=None):
if self._tree and self._tree.hascontent:
splitter = HeadingSplitter(max_level)
self._tree.visit(splitter)
for subtree in splitter.headings:
yield HeadingProxy(self._page, subtree, self._dumper)
@property
def links(self):
try:
links = self._notebook.links.list_links(self._page, LINK_DIR_FORWARD)
for link in links:
yield NotebookPathProxy(link.target)
except IndexNotFoundError:
pass # XXX needed for index_page and other specials because they do not exist in the index
@property
def backlinks(self):
try:
links = self._notebook.links.list_links(self._page, LINK_DIR_BACKWARD)
for link in links:
yield NotebookPathProxy(link.source)
except IndexNotFoundError:
pass # XXX needed for index_page and other specials because they do not exist in the index
@property
def attachments(self):
try:
source_dir = self._notebook.get_attachments_dir(self._page)
try:
for file in source_dir.list_files():
if file.exists(): # is file
href = './' + file.basename
dest_file = self._linker.resolve_dest_file(href)
yield FileProxy(file, dest_file=dest_file, relpath=href)
except FileNotFoundError:
pass
except IndexNotFoundError:
pass # XXX needed for index_page and other specials because they do not exist in the index
class HeadingProxy(ParseTreeProxy):
def __init__(self, page, tree, dumper):
self._page = page
self._tree = tree
self._dumper = dumper
self.level = tree.get_heading_level() or 1
class FilePathProxy(object):
def __init__(self, file, dest_file=None, relpath=None):
self._file = file
self._dest_file = dest_file
self.name = relpath or file.basename
self.basename = file.basename
class FileProxy(FilePathProxy):
@property
def mtime(self):
return datetime.datetime.fromtimestamp(float(self._file.mtime()))
@property
def size(self):
return format_file_size(self._file.size())
class NotebookPathProxy(object):
def __init__(self, path):
self._path = path
self.name = path.name
self.basename = path.basename
self.section = path.namespace
self.namespace = path.namespace # backward compat
class UriProxy(object):
def __init__(self, uri):
self.uri = uri
def __str__(self):
return self.uri
|
jaap-karssenberg/zim-desktop-wiki
|
zim/export/template.py
|
Python
|
gpl-2.0
| 15,774
|
[
"VisIt"
] |
73f515117689e5a747c9780cafbd1d271506ad9b810d0ba104366660cf7b28c0
|
#!/usr/bin/env python
# Script to rename atoms in PDB based on 'original' MOE fit conformation,
# with coordinates substituted for 'new' conformation
# Uses Parmed to print PDB in Amber compatible format
# Usage: rename_pdb.py old_file new_file
import parmed as pmd
import sys
old = pmd.load_file(sys.argv[1])
new = pmd.load_file(sys.argv[2])
old.write_pdb("Renamed_"+sys.argv[2],coordinates=new.coordinates)
|
rtb1c13/scripts
|
General/rename_pdb.py
|
Python
|
gpl-2.0
| 413
|
[
"Amber",
"MOE"
] |
bb788f0ea37a98c5e5dd12330ae9609af11231f0978838b4b603fa8df234d300
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base models for point-cloud based detection."""
from lingvo import compat as tf
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.tasks.car import base_decoder
from lingvo.tasks.car import detection_3d_metrics
from lingvo.tasks.car import transform_util
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder):
"""A decoder to use for decoding a detector model on Waymo."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'draw_visualizations', False, 'Boolean for whether to draw '
'visualizations. This is independent of laser_sampling_rate.')
p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params(
waymo_metadata.WaymoMetadata())
p.Define(
'extra_ap_metrics', {},
'Dictionary of extra AP metrics to run in the decoder. The key'
'is the name of the metric and the value is a sub-class of '
'APMetric')
p.Define(
'save_residuals', False,
'If True, this expects the residuals and ground-truth to be available '
'in the decoder output dictionary, and it will save it to the decoder '
'output file. See decode_include_residuals in PointDetectorBase '
'for details.')
return p
def CreateDecoderMetrics(self):
"""Decoder metrics for WaymoOpenDataset."""
p = self.params
waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics)
waymo_metrics = waymo_metric_p.Instantiate()
class_names = waymo_metrics.metadata.ClassNames()
# TODO(bencaine,vrv): There's some code smell with this ap_metrics params
# usage. We create local copies of the params to then instantiate them.
# Failing to do this risks users editing the params after construction of
# the object, making each object method call have the potential for side
# effects.
# Create a new dictionary with copies of the params converted to objects
# so we can then add these to the decoder metrics.
extra_ap_metrics = {}
for k, metric_p in p.extra_ap_metrics.items():
extra_ap_metrics[k] = metric_p.Instantiate()
waymo_metric_bev_p = waymo_metric_p.Copy()
waymo_metric_bev_p.box_type = '2d'
waymo_metrics_bev = waymo_metric_bev_p.Instantiate()
# Convert the list of class names to a dictionary mapping class_id -> name.
class_id_to_name = dict(enumerate(class_names))
# TODO(vrv): This uses the same top down transform as for KITTI;
# re-visit these settings since detections can happen all around
# the car.
top_down_transform = transform_util.MakeCarToImageTransform(
pixels_per_meter=32.,
image_ref_x=512.,
image_ref_y=1408.,
flip_axes=True)
decoder_metrics = py_utils.NestedMap({
'top_down_visualization':
(detection_3d_metrics.TopDownVisualizationMetric(
top_down_transform,
image_height=1536,
image_width=1024,
class_id_to_name=class_id_to_name)),
'num_samples_in_batch': metrics.AverageMetric(),
'waymo_metrics': waymo_metrics,
'waymo_metrics_bev': waymo_metrics_bev,
})
self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics']
for k, metric in extra_ap_metrics.items():
decoder_metrics[k] = metric
self._update_metrics_class_keys.append(k)
decoder_metrics.mesh = detection_3d_metrics.WorldViewer()
return decoder_metrics
def ProcessOutputs(self, input_batch, model_outputs):
"""Produce additional decoder outputs for WaymoOpenDataset.
Args:
input_batch: A .NestedMap of the inputs to the model.
model_outputs: A .NestedMap of the outputs of the model, including::
- per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float
Tensor with per class 3D (7 DOF) bounding boxes.
- per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float
Tensor with per class, per box scores.
- per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor
indicating which boxes were still kept after NMS for each class.
Returns:
A NestedMap of additional decoder outputs needed for
PostProcessDecodeOut.
"""
del model_outputs
p = self.params
input_labels = input_batch.labels
input_metadata = input_batch.metadata
source_ids = tf.strings.join([
input_metadata.run_segment,
tf.as_string(input_metadata.run_start_offset)
],
separator='_')
ret = py_utils.NestedMap({
'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points,
# Ground truth.
'bboxes_3d': input_labels.bboxes_3d,
'bboxes_3d_mask': input_labels.bboxes_3d_mask,
'labels': input_labels.labels,
'label_ids': input_labels.label_ids,
'speed': input_labels.speed,
'acceleration': input_labels.acceleration,
# Fill the following in.
'source_ids': source_ids,
'difficulties': input_labels.single_frame_detection_difficulties,
'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask,
'run_segment': input_metadata.run_segment,
'run_start_offset': input_metadata.run_start_offset,
'pose': input_metadata.pose,
})
if p.draw_visualizations:
laser_sample = self._SampleLaserForVisualization(
input_batch.lasers.points_xyz, input_batch.lasers.points_padding)
ret.update(laser_sample)
return ret
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
"""Post-processes the decoder outputs."""
p = self.params
# Update num_samples_in_batch.
batch_size, num_classes, num_boxes, _ = (
dec_out_dict.per_class_predicted_bboxes.shape)
dec_metrics_dict.num_samples_in_batch.Update(batch_size)
# Update decoder output by removing z-coordinate, thus reshaping the bboxes
# to [batch, num_bboxes, 5] to be compatible with
# TopDownVisualizationMetric.
# Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi).
bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool)
bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx]
predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx]
if p.draw_visualizations and dec_out_dict.points_sampled:
tf.logging.info('Updating sample for top down visualization')
dec_metrics_dict.mesh.Update(
py_utils.NestedMap({
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
}))
# Flatten our predictions/scores to match the API of the visualization
# The last dimension of flattened_bboxes is 5 due to the mask
# above using bbox_2d_idx.
flattened_bboxes = np.reshape(predicted_bboxes,
[batch_size, num_classes * num_boxes, 5])
flattened_visualization_weights = np.reshape(
dec_out_dict.visualization_weights,
[batch_size, num_classes * num_boxes])
# Create a label id mask for now to maintain compatibility.
# TODO(bencaine): Refactor visualizations to reflect new structure.
flattened_visualization_labels = np.tile(
np.arange(0, num_classes)[np.newaxis, :, np.newaxis],
[batch_size, 1, num_boxes])
flattened_visualization_labels = np.reshape(
flattened_visualization_labels, [batch_size, num_classes * num_boxes])
dec_metrics_dict.top_down_visualization.Update(
py_utils.NestedMap({
'visualization_labels': flattened_visualization_labels,
'predicted_bboxes': flattened_bboxes,
'visualization_weights': flattened_visualization_weights,
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
'gt_bboxes_2d': bboxes_2d,
'gt_bboxes_2d_weights': dec_out_dict.bboxes_3d_mask,
'labels': dec_out_dict.labels,
'difficulties': dec_out_dict.difficulties,
'source_ids': dec_out_dict.source_ids,
}))
# Update AP metrics.
# Skip zeroth step decoding.
if dec_out_dict.global_step == 0:
return None
# TODO(bencaine/vrv): Refactor to unify Waymo code and KITTI
# Returned values are saved in model_dir/decode_* directories.
output_to_save = []
for batch_idx in range(batch_size):
pred_bboxes = dec_out_dict.per_class_predicted_bboxes[batch_idx]
pred_bbox_scores = dec_out_dict.per_class_predicted_bbox_scores[batch_idx]
# The current API expects a 'height' matrix to be passed for filtering
# detections based on height. This is a KITTI-ism that we need to remove,
# but for now we just give a height of 1. The MinHeight metadata function
# for non-KITTI datasets should have a threshold lower than this value.
heights = np.ones((num_classes, num_boxes)).astype(np.float32)
gt_mask = dec_out_dict.bboxes_3d_mask[batch_idx].astype(bool)
gt_labels = dec_out_dict.labels[batch_idx][gt_mask]
gt_bboxes = dec_out_dict.bboxes_3d[batch_idx][gt_mask]
gt_difficulties = dec_out_dict.difficulties[batch_idx][gt_mask]
gt_num_points = dec_out_dict.num_points_in_bboxes[batch_idx][gt_mask]
# Note that this is not used in the KITTI evaluation.
gt_speed = dec_out_dict.speed[batch_idx][gt_mask]
# TODO(shlens): Update me
for metric_key in self._update_metrics_class_keys:
metric_cls = dec_metrics_dict[metric_key]
metric_cls.Update(
dec_out_dict.source_ids[batch_idx],
py_utils.NestedMap(
groundtruth_labels=gt_labels,
groundtruth_bboxes=gt_bboxes,
groundtruth_difficulties=gt_difficulties,
groundtruth_num_points=gt_num_points,
groundtruth_speed=gt_speed,
detection_scores=pred_bbox_scores,
detection_boxes=pred_bboxes,
detection_heights_in_pixels=heights,
))
# We still want to save all ground truth (even if it was filtered
# in some way) so we use the unfiltered_bboxes_3d_mask here.
gt_save_mask = dec_out_dict.unfiltered_bboxes_3d_mask[batch_idx].astype(
bool)
pd_save_mask = dec_out_dict.per_class_valid_mask[batch_idx] > 0
class_ids = np.tile(np.arange(num_classes)[:, np.newaxis], [1, num_boxes])
saved_results = py_utils.NestedMap(
pose=dec_out_dict.pose[batch_idx],
frame_id=dec_out_dict.source_ids[batch_idx],
bboxes=pred_bboxes[pd_save_mask],
scores=pred_bbox_scores[pd_save_mask],
gt_labels=dec_out_dict.labels[batch_idx][gt_save_mask],
gt_label_ids=dec_out_dict.label_ids[batch_idx][gt_save_mask],
gt_speed=dec_out_dict.speed[batch_idx][gt_save_mask],
gt_acceleration=dec_out_dict.acceleration[batch_idx][gt_save_mask],
class_ids=class_ids[pd_save_mask],
gt_bboxes=dec_out_dict.bboxes_3d[batch_idx][gt_save_mask],
gt_difficulties=dec_out_dict.difficulties[batch_idx][gt_save_mask],
)
if p.save_residuals:
# The leading shapes of these tensors should match bboxes and scores.
# These are the underlying tensors that can are used to compute score
# and bboxes.
saved_results.update({
'bboxes_gt_residuals':
dec_out_dict.per_class_gt_residuals[batch_idx][pd_save_mask],
'bboxes_gt_labels':
dec_out_dict.per_class_gt_labels[batch_idx][pd_save_mask],
'bboxes_residuals':
dec_out_dict.per_class_residuals[batch_idx][pd_save_mask],
'bboxes_logits':
dec_out_dict.per_class_logits[batch_idx][pd_save_mask],
'bboxes_anchor_boxes':
dec_out_dict.per_class_anchor_boxes[batch_idx][pd_save_mask],
})
serialized = self.SaveTensors(saved_results)
output_to_save += [(dec_out_dict.source_ids[batch_idx], serialized)]
return output_to_save
|
tensorflow/lingvo
|
lingvo/tasks/car/waymo/waymo_decoder.py
|
Python
|
apache-2.0
| 13,038
|
[
"VisIt"
] |
d10ba2d0db4d78daa2bc077d39ce96bfdb10b5d767ac150663b63ce1a9e718ff
|
# -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# ViewVC: View CVS/SVN repositories via a web browser
#
# -----------------------------------------------------------------------
#
# This is a teeny stub to launch the main ViewVC app. It checks the load
# average, then loads the (precompiled) viewvc.py file and runs it.
#
# -----------------------------------------------------------------------
#
#########################################################################
#
# INSTALL-TIME CONFIGURATION
#
# These values will be set during the installation process. During
# development, they will remain None.
#
LIBRARY_DIR = None
CONF_PATHNAME = None
#########################################################################
#
# Adjust sys.path to include our library directory
#
import sys
if LIBRARY_DIR:
sys.path.insert(0, LIBRARY_DIR)
import sapi
import viewvc
import query
reload(query) # need reload because initial import loads this stub file
cfg = viewvc.load_config(CONF_PATHNAME)
def index(req):
server = sapi.ModPythonServer(req)
try:
query.main(server, cfg, "viewvc.py")
finally:
server.close()
|
foresthz/fusion5.1
|
www/scm/viewvc/bin/mod_python/query.py
|
Python
|
gpl-2.0
| 1,502
|
[
"VisIt"
] |
df96896b82c7a9c5226b373e2ef1eac337d7f4acfc79c3de1df3df0ab4e23d02
|
# Monte Carlo policy evaluation
# Every-visit
# Uncertain state transition
import numpy as np
from grid_world import standard_grid, negative_grid
from dp_ipe_dst_dp import print_values, print_policy
SMALL_ENOUGH = 1e-4
GAMMA = 0.9
P_ACTION = 0.6
def affected_action(grid, s, a):
p = np.random.random()
if p < P_ACTION or len(grid.actions[s]) == 1:
return a
else:
tmp = list(grid.actions[s])
tmp.remove(a)
return np.random.choice(tmp)
def play_one_episode(grid, policy):
valid_states = list(grid.actions.keys())
start_state_index = np.random.choice(len(valid_states))
s = grid.set_state(valid_states[start_state_index])
states_and_rewards = [(s, 0)]
while not grid.game_over():
a_desired = policy[s]
a = affected_action(grid, s, a_desired)
r = grid.move(a)
s = grid.current_state()
states_and_rewards.append((s, r))
G = 0
states_and_returns = []
first = True
for s, r in reversed(states_and_rewards):
if first:
first = False
else:
states_and_returns.append((s, G))
G = r + GAMMA * G
states_and_returns.reverse()
return states_and_returns
if __name__ == '__main__':
grid = standard_grid()
print("Rewards:")
print_values(grid.rewards, grid)
# ----------------
# |R |R |R |R |f+|
# ----------------
# |D |xx|R |R |f-|
# ----------------
# |R |R |U |xx|U |
# ----------------
# |U |xx|U |R |U |
# ----------------
policy = {
(0, 0): ('R'),
(1, 0): ('R'),
(2, 0): ('R'),
(3, 0): ('R'),
(0, 1): ('D'),
(2, 1): ('R'),
(3, 1): ('R'),
(0, 2): ('R'),
(1, 2): ('R'),
(2, 2): ('U'),
(4, 2): ('U'),
(0, 3): ('U'),
(2, 3): ('U'),
(3, 3): ('R'),
(4, 3): ('U'),
}
print("Fixed Policy:")
print_policy(policy, grid)
V = {}
returns = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
returns[s] = []
else:
V[s] = 0
for i in range(1000):
# print("Value:")
# print_values(V, grid)
# input()
states_and_returns = play_one_episode(grid, policy)
for s, G in states_and_returns:
returns[s].append(G)
V[s] = np.mean(returns[s])
print("Values:")
print_values(V, grid)
|
GitYiheng/reinforcement_learning_test
|
test00_previous_files/mc_pe_ust_ev.py
|
Python
|
mit
| 2,099
|
[
"VisIt"
] |
bd28a4fe51c3019637d0d50b0a1dfdcbcdd04365f1e3f981dad9cd0c66e5ccff
|
from optparse import OptionParser
import argparse
import numpy as np
import pandas as pd
import csv
import pysam
import pdb
from bx.intervals.intersection import Interval, IntervalTree
import cluster
import genotyper as gt
from GC_data import GC_data
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--contig", required=True)
parser.add_argument("--output", dest="fn_out", required=True)
parser.add_argument("--gglob_dir", required=True)
parser.add_argument("--regions", dest="fn_regions", required=True)
parser.add_argument("--plot_dir", default="plots")
parser.add_argument("--fn_fa", default="/net/eichler/vol7/home/psudmant/genomes/fastas/hg19_1kg_phase2_reference/human_g1k_v37.fasta", help="reference genome fasta file (Default: %(default)s)")
parser.add_argument("--GC_DTS", dest="fn_GC_DTS", default="/net/eichler/vol7/home/psudmant/genomes/GC_tracks/windowed_DTS/HG19/500_bp_slide_GC", help="GC tracks DTS file (Default: %(default)s")
parser.add_argument("--DTS_contigs", dest='fn_DTS_contigs', default="/net/eichler/vol7/home/psudmant/EEE_Lab/1000G/1000genomesScripts/windowed_analysis/DTS_window_analysis/windows/hg19_slide/500_bp_windows.pkl.contigs", help="Contig sizes file (Default: %(default)s)")
parser.add_argument("--dup_tabix", dest="fn_dup_tabix", default="/net/eichler/vol7/home/psudmant/genomes/annotations/hg19/superdups/superdups.merged.bed.gz", help="Superdups tabix file (Default: %(default)s)")
parser.add_argument("--max_cp", default=12, type=int, help="Maximum cp to consider for GMM. Greater values will be rounded instead of fitted. Default: %(default)s")
parser.add_argument("--header_chr", help="Name of chr to print header for")
parser.add_argument("--data_type", choices=["wssd", "sunk"], help="Type of data to genotype (wssd or sunk)")
parser.add_argument("--genotype_method", choices=["float", "GMM"], help="Output float or integer (Gaussian Mixture Model) genotypes")
parser.add_argument("--subset", default=0)
parser.add_argument("--total_subsets", default=1)
parser.add_argument("--subset_indivs", nargs="+", help="Subset of individuals to genotype")
parser.add_argumnet("--manifest", help="Path to manifest file with sample column")
args = parser.parse_args()
# (o, args) = opts.parse_args()
max_cp = int(args.max_cp)
subset = int(args.subset)
total_subsets = int(args.total_subsets)
tbx_dups = pysam.Tabixfile(args.fn_dup_tabix)
GC_inf = GC_data(args.fn_GC_DTS, args.contig, args.fn_DTS_contigs)
if args.subset_indivs is not None:
indivs = args.subset_indivs
elif args.manifest is not None:
indivs = pd.read_table(args.manifest, header=0).sample.unique().tolist()
else:
indivs = list(pd.read_json("%s/gglob.idx" % args.gglob_dir).indivs)
# GENOTYPE TIME!
g = gt.genotyper(args.contig, gglob_dir = args.gglob_dir, plot_dir = args.plot_dir, subset_indivs = indivs, fn_fa=args.fn_fa, dup_tabix = tbx_dups, GC_inf = GC_inf)
regions = pd.read_csv(args.fn_regions, header=None, delimiter="\t", index_col=None)
regions.columns = ["chr", "start", "end", "name"]
regions_by_contig = regions[regions['chr'] == args.contig]
nregions = regions_by_contig.shape[0]
FOUT = open(args.fn_out, 'w')
if args.contig == args.header_chr and subset == 0:
FOUT.write("chr\tstart\tend\tname\t%s\n"%("\t".join(indivs)))
for i, row in regions_by_contig.iterrows():
contig, s, e, name = row['chr'], int(row['start']), int(row['end']), row['name']
if args.data_type == "wssd":
X, idx_s, idx_e = g.get_gt_matrix(contig, s, e)
else:
X, idx_s, idx_e = g.get_sunk_gt_matrix(contig, s, e)
if args.genotype_method == "float":
gt_list = np.mean(X, 1).tolist()
gt_ordered = [gt_list[g.indivs.index(indiv)] for indiv in indivs]
gts = "\t".join(map(str, gt_ordered))
else:
gts_by_indiv = g.simple_GMM_genotype(X, max_cp=max_cp)
gts = "\t".join(["%d"%(gts_by_indiv[i]) for i in indivs])
#gX.simple_plot("%s/%s_%d_%d.pdf"%(args.plot_dir, contig,s,e))
FOUT.write("%s\t%d\t%d\t%s\t%s\n"%(contig, s, e, name, gts))
print i, "%s\t%d\t%d\t%s\t%s\n"%(contig, s, e, name, gts)
|
EichlerLab/read_depth_genotyper
|
scripts/combine_genotypes.py
|
Python
|
mit
| 4,355
|
[
"Gaussian",
"pysam"
] |
356c165c1880b495ec472905e713db082129dbd0c818280eb0670ada972e93f8
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFdbInfiniummethylationHg19(RPackage):
"""Compiled HumanMethylation27 and HumanMethylation450 annotations."""
# No available git repository
homepage = "https://bioconductor.org/packages/release/data/annotation/html/FDb.InfiniumMethylation.hg19.html"
url = "https://bioconductor.org/packages/release/data/annotation/src/contrib/FDb.InfiniumMethylation.hg19_2.2.0.tar.gz"
version('2.2.0', sha256='605aa3643588a2f40a942fa760b92662060a0dfedb26b4e4cd6f1a78b703093f')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.7.22:', type=('build', 'run'))
depends_on('r-txdb-hsapiens-ucsc-hg19-knowngene', type=('build', 'run'))
depends_on('r-org-hs-eg-db', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-fdb-infiniummethylation-hg19/package.py
|
Python
|
lgpl-2.1
| 1,076
|
[
"Bioconductor"
] |
2131e790c6afc88d9588a63f92ec71afcf6b500e7b0c9618679b62460d2f44b2
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs client tasks for testing IMAP OAuth2 authentication.
To use this script, you'll need to have registered with Google as an OAuth
application and obtained an OAuth client ID and client secret.
See https://developers.google.com/identity/protocols/OAuth2 for instructions on
registering and for documentation of the APIs invoked by this code.
This script has 3 modes of operation.
1. The first mode is used to generate and authorize an OAuth2 token, the
first step in logging in via OAuth2.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--generate_oauth2_token
The script will converse with Google and generate an oauth request
token, then present you with a URL you should visit in your browser to
authorize the token. Once you get the verification code from the Google
website, enter it into the script to get your OAuth access token. The output
from this command will contain the access token, a refresh token, and some
metadata about the tokens. The access token can be used until it expires, and
the refresh token lasts indefinitely, so you should record these values for
reuse.
2. The script will generate new access tokens using a refresh token.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--refresh_token=1/Yzm6MRy4q1xi7Dx2DuWXNgT6s37OrP_DW_IoyTum4YA
3. The script will generate an OAuth2 string that can be fed
directly to IMAP or SMTP. This is triggered with the --generate_oauth2_string
option.
oauth2 --generate_oauth2_string --user=xxx@gmail.com \
--access_token=ya29.AGy[...]ezLg
The output of this mode will be a base64-encoded string. To use it, connect to a
IMAPFE and pass it as the second argument to the AUTHENTICATE command.
a AUTHENTICATE XOAUTH2 a9sha9sfs[...]9dfja929dk==
"""
import base64
import imaplib
import json
from optparse import OptionParser
import smtplib
import sys
import urllib
def SetupOptionParser():
# Usage message is the module's docstring.
parser = OptionParser(usage=__doc__)
parser.add_option('--generate_oauth2_token',
action='store_true',
dest='generate_oauth2_token',
help='generates an OAuth2 token for testing')
parser.add_option('--generate_oauth2_string',
action='store_true',
dest='generate_oauth2_string',
help='generates an initial client response string for '
'OAuth2')
parser.add_option('--client_id',
default=None,
help='Client ID of the application that is authenticating. '
'See OAuth2 documentation for details.')
parser.add_option('--client_secret',
default=None,
help='Client secret of the application that is '
'authenticating. See OAuth2 documentation for '
'details.')
parser.add_option('--access_token',
default=None,
help='OAuth2 access token')
parser.add_option('--refresh_token',
default=None,
help='OAuth2 refresh token')
parser.add_option('--scope',
default='https://mail.google.com/',
help='scope for the access token. Multiple scopes can be '
'listed separated by spaces with the whole argument '
'quoted.')
parser.add_option('--test_imap_authentication',
action='store_true',
dest='test_imap_authentication',
help='attempts to authenticate to IMAP')
parser.add_option('--test_smtp_authentication',
action='store_true',
dest='test_smtp_authentication',
help='attempts to authenticate to SMTP')
parser.add_option('--user',
default=None,
help='email address of user whose account is being '
'accessed')
return parser
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], UrlEscape(param[1])))
return '&'.join(param_fragments)
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def GenerateOAuth2String(username, access_token, base64_encode=True):
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
smtp_conn.ehlo('test')
smtp_conn.starttls()
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
def RequireOptions(options, *args):
missing = [arg for arg in args if getattr(options, arg) is None]
if missing:
print 'Missing options: %s' % ' '.join(missing)
sys.exit(-1)
def main(argv):
options_parser = SetupOptionParser()
(options, args) = options_parser.parse_args()
if options.refresh_token:
RequireOptions(options, 'client_id', 'client_secret')
response = RefreshToken(options.client_id, options.client_secret,
options.refresh_token)
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.generate_oauth2_string:
RequireOptions(options, 'user', 'access_token')
print ('OAuth2 argument:\n' +
GenerateOAuth2String(options.user, options.access_token))
elif options.generate_oauth2_token:
RequireOptions(options, 'client_id', 'client_secret')
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % GeneratePermissionUrl(options.client_id, options.scope)
authorization_code = raw_input('Enter verification code: ')
response = AuthorizeTokens(options.client_id, options.client_secret,
authorization_code)
print 'Refresh Token: %s' % response['refresh_token']
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.test_imap_authentication:
RequireOptions(options, 'user', 'access_token')
TestImapAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
elif options.test_smtp_authentication:
RequireOptions(options, 'user', 'access_token')
TestSmtpAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
else:
options_parser.print_help()
print 'Nothing to do, exiting.'
return
if __name__ == '__main__':
main(sys.argv)
|
tripleee/gmail-oauth2-tools
|
python/oauth2.py
|
Python
|
apache-2.0
| 12,198
|
[
"VisIt"
] |
4b65d694ddc28f786d334c0950cafec02f699877e0f549d93092a3bfe7619b1f
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.misc.fluxes Contains the ObservedImageMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import gc
from collections import defaultdict
# Import the relevant PTS classes and modules
from ..basics.log import log
from ..tools import filesystem as fs
from ..filter.filter import parse_filter
from ...magic.core.kernel import ConvolutionKernel
from ...magic.core.kernel import get_fwhm as get_kernel_fwhm
from ...magic.core.datacube import DataCube
from ...magic.basics.coordinatesystem import CoordinateSystem
from ...magic.core.remote import RemoteDataCube
from ...magic.core import fits
from ..tools.utils import lazyproperty
from ..tools import types
from ..remote.remote import Remote
from ..prep.deploy import Deployer
from ..tools import strings
from ..tools.stringify import tostr
from ..tools import numbers
from .datacubes import DatacubesMiscMaker, get_datacube_instrument_name
from ..basics.range import QuantityRange
from ..tools import sequences
# -----------------------------------------------------------------
default_filter_names = ["FUV", "NUV", "u", "g", "r", "i", "z", "H", "J", "Ks", "I1", "I2", "I3", "I4", "W1", "W2",
"W3", "W4", "Pacs 70", "Pacs 100", "Pacs 160", "SPIRE 250", "SPIRE 350", "SPIRE 500"]
# -----------------------------------------------------------------
class ObservedImageMaker(DatacubesMiscMaker):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param interactive:
:return:
"""
# Call the constructor of the base class
super(ObservedImageMaker, self).__init__(*args, **kwargs)
# -- Attributes --
# Filter names
self.filter_names = default_filter_names
# Output paths
self.output_paths_instruments = None
# The dictionary containing the different SKIRT output datacubes
self.datacubes = dict()
# The dictionary containing the created observation images
self.images = dict()
# The coordinate systems of each instrument
self.coordinate_systems = None
# The FWHMs
self.fwhms = None
# The kernel paths
self.kernel_paths = None
# The PSF FWHMs
self.psf_fwhms = None
# The target unit
self.unit = None
# The host id
self.host_id = None
# Remote options
self.remote_spectral_convolution = False
self.remote_threshold = None
self.remote_npixels_threshold = None
self.remote_rebin_threshold = None
self.remote_convolve_threshold = None
# No spectral convolution for certain filters
self.no_spectral_convolution_filters = []
# The path to the output data cubes
self.paths = defaultdict(dict)
# The rebin coordinate systems
self.rebin_coordinate_systems = None
# -----------------------------------------------------------------
@lazyproperty
def output_path_hash(self):
return strings.hash_string(self.output_path)
# -----------------------------------------------------------------
@lazyproperty
def remote_intermediate_results_path(self):
"""
Thisnf unction ...
:return:
"""
dirname = "observedimagemaker_" + self.output_path_hash
dirpath = fs.join(self.remote.pts_temp_path, dirname)
# Create
if self.config.write_intermediate and not self.remote.is_directory(dirpath): self.remote.create_directory(dirpath)
# Return the path
return dirpath
# -----------------------------------------------------------------
@lazyproperty
def intermediate_results_path(self):
"""
This function ...
:return:
"""
return self.output_path_directory("intermediate", create=self.config.write_intermediate)
# -----------------------------------------------------------------
@lazyproperty
def remote_intermediate_initial_path(self):
"""
This function ...
:return:
"""
# Set path
initial_path = fs.join(self.remote_intermediate_results_path, "initial")
# Create?
if self.config.write_intermediate and not self.remote.is_directory(initial_path): self.remote.create_directory(initial_path)
# Return the path
return initial_path
# -----------------------------------------------------------------
@lazyproperty
def intermediate_initial_path(self):
"""
This function ...
:return:
"""
# Set path
initial_path = fs.join(self.intermediate_results_path, "initial")
# Create?
if self.config.write_intermediate and not fs.is_directory(initial_path): fs.create_directory(initial_path)
# Return the path
return initial_path
# -----------------------------------------------------------------
@lazyproperty
def remote_intermediate_rebin_path(self):
"""
This function ...
:return:
"""
# Set path
rebin_path = fs.join(self.remote_intermediate_results_path, "rebin")
# Create?
if self.config.write_intermediate and not self.remote.is_directory(rebin_path): self.remote.create_directory(rebin_path)
# Return the path
return rebin_path
# -----------------------------------------------------------------
@lazyproperty
def intermediate_rebin_path(self):
"""
This function ...
:return:
"""
# Set path
rebin_path = fs.join(self.intermediate_results_path, "rebin")
# Create?
if self.config.write_intermediate and not fs.is_directory(rebin_path): fs.create_directory(rebin_path)
# Return the path
return rebin_path
# -----------------------------------------------------------------
@lazyproperty
def remote_intermediate_convolve_path(self):
"""
This function ...
:return:
"""
# Set path
convolve_path = fs.join(self.remote_intermediate_results_path, "convolve")
# Create?
if self.config.write_intermediate and not self.remote.is_directory(convolve_path): self.remote.create_directory(convolve_path)
# Return the path
return convolve_path
# -----------------------------------------------------------------
@lazyproperty
def intermediate_convolve_path(self):
"""
Thisn function ...
:return:
"""
# Set path
convolve_path = fs.join(self.intermediate_results_path, "convolve")
# Create?
if self.config.write_intermediate and not fs.is_directory(convolve_path): fs.create_directory(convolve_path)
# Return the path
return convolve_path
# -----------------------------------------------------------------
# @lazyproperty
# def remote_kernels_path(self):
#
# """
# Thisnf unction ...
# :return:
# """
#
# dirname = "observedimagemaker_" + self.output_path_hash + "_kernels"
# dirpath = fs.join(self.remote.pts_temp_path, dirname)
#
# # Create
# if self.config.write_kernels and not self.remote.is_directory(dirpath): self.remote.create_directory(dirpath)
#
# # Return the path
# return dirpath
# -----------------------------------------------------------------
@lazyproperty
def kernels_path(self):
"""
This function ...
:return:
"""
return self.output_path_directory("kernels", create=self.config.write_kernels)
# -----------------------------------------------------------------
@property
def has_coordinate_systems(self):
"""
This function ...
:return:
"""
return self.coordinate_systems is not None
# -----------------------------------------------------------------
@property
def has_kernel_paths(self):
"""
This function ...
:return:
"""
return self.kernel_paths is not None
# -----------------------------------------------------------------
@property
def has_psf_fwhms(self):
"""
This function ...
:return:
"""
return self.psf_fwhms is not None
# -----------------------------------------------------------------
@property
def convolution(self):
"""
Thisf unction ...
:return:
"""
return self.has_kernel_paths or self.has_psf_fwhms
# -----------------------------------------------------------------
@property
def rebinning(self):
"""
Thisf unction ...
:return:
"""
return self.rebin_coordinate_systems is not None
# -----------------------------------------------------------------
@property
def do_sky(self):
"""
This function ...
:return:
"""
return self.config.sky
# -----------------------------------------------------------------
@property
def do_stars(self):
"""
This function ...
:return:
"""
return self.config.stars
# -----------------------------------------------------------------
@property
def do_conversion(self):
"""
This function ...
:return:
"""
return self.unit is not None
# -----------------------------------------------------------------
@property
def do_write(self):
"""
This function ...
:return:
"""
return self.output_path is not None
# -----------------------------------------------------------------
@property
def do_clear(self):
"""
This function ...
:return:
"""
return not self.config.keep_intermediate
# -----------------------------------------------------------------
@property
def do_plot(self):
"""
This function ...
:return:
"""
return self.config.plot
# -----------------------------------------------------------------
@property
def has_remote(self):
"""
This function ...
:return:
"""
return self.host_id is not None
# -----------------------------------------------------------------
@lazyproperty
def remote(self):
"""
This function ...
:return:
"""
return Remote(host_id=self.host_id)
# -----------------------------------------------------------------
@lazyproperty
def session(self):
"""
This function ...
:return:
"""
#new_connection = False
new_connection = True
session = self.remote.start_python_session(attached=True, new_connection_for_attached=new_connection)
return session
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs
:return:
"""
# 2. Create the wavelength grid
self.create_wavelength_grid()
# 3. Load the datacubes
self.load_datacubes()
# 4. Set the coordinate systems of the datacubes
if self.has_coordinate_systems: self.set_coordinate_systems()
# 5. Make the observed images
self.make_images()
# 6. Do convolutions
if self.convolution: self.convolve()
# 7. Rebin
if self.rebinning: self.rebin()
# 8. Add sky
if self.do_sky: self.add_sky()
# 9. Add stars
if self.do_stars: self.add_stars()
# 10. Do unit conversions
if self.do_conversion: self.convert_units()
# 11. Write the results
if self.do_write: self.write()
# 12. Clear intermediate results
if self.do_clear: self.clear()
# 13. Plot
if self.do_plot: self.plot()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(ObservedImageMaker, self).setup(**kwargs)
# Get filters for which not to perform spectral convolution
self.no_spectral_convolution_filters = kwargs.pop("no_spectral_convolution_filters", [])
# Output paths for instruments
self.output_paths_instruments = kwargs.pop("output_paths_instruments", None)
# Get filter names for which to create observed images
self.get_filter_names(**kwargs)
# Get coordinate systems of the datacubes
self.get_coordinate_systems(**kwargs)
# Get unit for the images
self.get_unit(**kwargs)
# Get kernels
self.get_kernels(**kwargs)
# Get rebin coordinate systems
self.get_rebin_coordinate_systems(**kwargs)
# Get remote host ID
self.get_host_id(**kwargs)
# Update the remote
if self.has_remote and self.config.deploy_pts: self.deploy_pts()
# -----------------------------------------------------------------
def get_filter_names(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Filter names
if kwargs.get("filter_names", None) is not None:
# Check
if "filters" in kwargs: raise ValueError("Cannot specify 'filters' and 'filter_names' simultaneously")
# Set filter names
self.filter_names = kwargs.pop("filter_names")
# Filters
elif kwargs.get("filters", None) is not None: self.filter_names = [str(fltr) for fltr in kwargs.pop("filters")]
# From config
elif self.config.filters is not None: self.filter_names = [str(fltr) for fltr in self.config.filters]
# -----------------------------------------------------------------
def get_coordinate_systems(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Debugging
log.debug("Getting the coordinate systems ...")
# WCS
if kwargs.get("wcs", None) is not None:
# Check that wcs_instrument is defined
wcs_instrument = kwargs.pop("wcs_instrument")
# Get the wcs
wcs = kwargs.pop("wcs")
# Set the coordinate system
self.coordinate_systems = dict()
self.coordinate_systems[wcs_instrument] = wcs
# WCS paths
elif kwargs.get("wcs_paths", None) is not None:
# Get the paths
wcs_paths = kwargs.pop("wcs_paths")
# Defined for each instrument
if types.is_dictionary(wcs_paths):
# Initialize
self.coordinate_systems = dict()
# Loop over the instruments
for instrument_name in wcs_paths:
# Load wcs
wcs = CoordinateSystem.from_file(wcs_paths[instrument_name])
# Set wcs
self.coordinate_systems[instrument_name] = wcs
# Invalid
else: raise ValueError("Invalid option for 'wcs_path'")
# Single WCS path is defined
elif kwargs.get("wcs_path", None) is not None:
# Check that wcs_instrument is defined
wcs_instrument = kwargs.pop("wcs_instrument")
# Get the wcs
wcs_path = kwargs.pop("wcs_path")
wcs = CoordinateSystem.from_file(wcs_path)
# Set the coordinate system
self.coordinate_systems = dict()
self.coordinate_systems[wcs_instrument] = wcs
# -----------------------------------------------------------------
def get_unit(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Debugging
log.debug("Getting the target unit ...")
# Get the unit
self.unit = kwargs.pop("unit", self.config.unit)
# -----------------------------------------------------------------
def get_kernels(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Debugging
log.debug("Getting the kernel paths ...")
# Checks
auto_psfs = kwargs.pop("auto_psfs", self.config.convolve)
if kwargs.get("kernel_paths", None) is not None and kwargs.get("psf_paths", None) is not None: raise ValueError("Cannot specify 'kernel_paths' and 'psf_paths' simultaneously")
if kwargs.get("psf_paths", None) is not None and auto_psfs: raise ValueError("Cannot specify 'psf_paths' when 'auto_psfs' is enabled")
if auto_psfs and kwargs.get("kernel_paths", None) is not None: raise ValueError("Cannot specify 'kernel_paths' when 'auto_psfs' is enabled")
# Get FWHMs reference dataset
if kwargs.get("fwhms_dataset", None) is not None:
# Load the dataset
from ...magic.core.dataset import DataSet
fwhms_dataset = kwargs.pop("fwhms_dataset")
if types.is_string_type(fwhms_dataset): fwhms_dataset = DataSet.from_file(fwhms_dataset)
image_names_for_filters = fwhms_dataset.get_names_for_filters(self.filter_names)
for filter_name, image_name in zip(self.filter_names, image_names_for_filters):
# Check whether there is such an image
if image_name is None:
log.warning("There is no image in the dataset for the '" + filter_name + "' filter: FWHM cannot be obtained")
continue
# Get the FWHM
fwhm = fwhms_dataset.get_fwhm(image_name)
# If defined, set the FWHM
if fwhm is not None:
if self.fwhms is None: self.fwhms = dict()
self.fwhms[filter_name] = fwhm
else: log.warning("The FWHM of the '" + filter_name + "' image in the dataset is not defined")
# Kernel paths
if kwargs.get("kernel_paths", None) is not None: self.kernel_paths = kwargs.pop("kernel_paths")
# PSF paths
elif kwargs.pop("psf_paths", None) is not None: self.kernel_paths = kwargs.pop("psf_paths")
# Automatic PSF determination
elif auto_psfs: self.set_psf_kernels()
# -----------------------------------------------------------------
def has_fwhm(self, filter_name):
"""
This fnction ...
:param filter_name:
:return:
"""
return self.fwhms is not None and filter_name in self.fwhms and self.fwhms[filter_name] is not None
# -----------------------------------------------------------------
def set_psf_kernels(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Determining the PSF kernel automatically for each image filter ...")
# Imports
from ...magic.convolution.aniano import AnianoKernels
from ...magic.convolution.kernels import get_fwhm, has_variable_fwhm, has_average_variable_fwhm, get_average_variable_fwhm
# Get Aniano kernels object
aniano = AnianoKernels()
# Initialize the kernel paths dictionary
self.kernel_paths = dict()
# Loop over the filter names
for filter_name in self.filter_names:
# Check whether we have Aniano PSF
if aniano.has_psf_for_filter(filter_name):
# Get the psf path
psf_path = aniano.get_psf_path(filter_name, fwhm=self.get_fwhm(filter_name))
# Set the PSF kernel path
self.kernel_paths[filter_name] = psf_path
# check whether we have a FWHM
elif self.has_fwhm(filter_name):
# Get the FWHM
fwhm = self.fwhms[filter_name]
# Set the FWHM
if self.psf_fwhms is None: self.psf_fwhms = dict()
self.psf_fwhms[filter_name] = fwhm
# Variable FWHM?
elif not has_variable_fwhm(filter_name):
# Get the FWHM
fwhm = get_fwhm(filter_name)
# Set the FWHM
if self.psf_fwhms is None: self.psf_fwhms = dict()
self.psf_fwhms[filter_name] = fwhm
# The FWHM is variable, but we have a good average value
elif has_average_variable_fwhm(filter_name):
# Get the average FWHM
fwhm = get_average_variable_fwhm(filter_name)
# Give warning
log.warning("The FWHM for the '" + filter_name + "' is variable, but using the average value for this filter (Clark et al., 2017) ...")
# Set the FWHM
if self.psf_fwhms is None: self.psf_fwhms = dict()
self.psf_fwhms[filter_name] = fwhm
# No FWHM can be found
else:
#raise ValueError("")
log.error("The FWHM for the '" + filter_name + "' could not be determined: convolution will not be performed")
# -----------------------------------------------------------------
def get_rebin_coordinate_systems(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Debugging
log.debug("Getting rebin coordinate systems ...")
# Rebin WCS paths
if kwargs.get("rebin_wcs_paths", None) is not None:
# Initialize dictionary
self.rebin_coordinate_systems = dict()
# Get the argument
rebin_wcs_paths = kwargs.pop("rebin_wcs_paths")
# WCS paths are defined per instrument
if types.is_dictionary_of_dictionaries(rebin_wcs_paths):
# Loop over the different instruments
for instrument_name in rebin_wcs_paths:
wcs_dict = dict()
# Loop over the filter names
for filter_name in rebin_wcs_paths[instrument_name]:
# Load the wcs
wcs = CoordinateSystem.from_file(rebin_wcs_paths[instrument_name][filter_name])
wcs_dict[filter_name] = wcs
# Set the coordinate systems for this instrument
self.rebin_coordinate_systems[instrument_name] = wcs_dict
# WCS paths are only defined per filter name
elif types.is_dictionary(rebin_wcs_paths):
# Check that rebin_instrument is specified
rebin_instrument = kwargs.pop("rebin_instrument")
# Initialize
self.rebin_coordinate_systems = dict()
self.rebin_coordinate_systems[rebin_instrument] = dict()
# Load the coordinate systems
for filter_name in self.filter_names:
wcs = CoordinateSystem.from_file(rebin_wcs_paths[filter_name])
self.rebin_coordinate_systems[rebin_instrument][filter_name] = wcs
# Rebin WCS
elif kwargs.get("rebin_wcs", None) is not None:
# Check that rebin_instrument is specified
rebin_instrument = kwargs.pop("rebin_instrument")
# Initialize
self.rebin_coordinate_systems = dict()
self.rebin_coordinate_systems[rebin_instrument] = dict()
# Load the coordinate systems
rebin_wcs = kwargs.pop("rebin_wcs")
for filter_name in self.filter_names:
self.rebin_coordinate_systems[rebin_instrument][filter_name] = rebin_wcs
# Rebin wcs path
elif kwargs.get("rebin_wcs_path", None) is not None:
# Check that rebin_instrument is specified
rebin_instrument = kwargs.pop("rebin_instrument")
# INitialize
self.rebin_coordinate_systems = dict()
self.rebin_coordinate_systems[rebin_instrument] = dict()
# Load the wcs
rebin_wcs_path = kwargs.pop("rebin_wcs_path")
rebin_wcs = CoordinateSystem.from_file(rebin_wcs_path)
# Set the coordinate systems
for filter_name in self.filter_names:
self.rebin_coordinate_systems[rebin_instrument][filter_name] = rebin_wcs
# Rebin dataset
elif kwargs.get("rebin_dataset", None) is not None:
from ...magic.core.dataset import DataSet
# Get the dataset
dataset = kwargs.pop("rebin_dataset")
if types.is_string_type(dataset): dataset = DataSet.from_file(dataset)
# Check that rebin_instrument is specified
rebin_instrument = kwargs.pop("rebin_instrument")
# Initialize
self.rebin_coordinate_systems = dict()
self.rebin_coordinate_systems[rebin_instrument] = dict()
# Loop over the filter names
image_names_for_filters = dataset.get_names_for_filters(self.filter_names)
for filter_name, image_name in zip(self.filter_names, image_names_for_filters):
# Check whether there is such an image
if image_name is None:
log.warning("There is no image in the dataset for the '" + filter_name + "' filter: skipping for rebinning ...")
continue
# Get the coordinate system
#wcs = dataset.get_coordinate_system_for_filter(filter_name, return_none=True)
wcs = dataset.get_coordinate_system(image_name) # FASTER!
# if wcs is None:
# log.warning("The coordinate system for the '" + filter_name + "' filter is not found in the dataset: skipping ...")
# continue
# Set the coordinate system
self.rebin_coordinate_systems[rebin_instrument][filter_name] = wcs
# -----------------------------------------------------------------
def get_host_id(self, **kwargs):
"""
Thisf unction ...
:param kwargs:
:return:
"""
# Debugging
log.debug("Getting remote host ...")
# Get the host ID
self.host_id = kwargs.pop("host_id", None)
# Remote spectral convolution flag
self.remote_spectral_convolution = kwargs.pop("remote_spectral_convolution", False)
# Get thresholds
self.remote_threshold = kwargs.pop("remote_threshold", None)
self.remote_npixels_threshold = kwargs.pop("remote_npixels_threshold", None)
self.remote_rebin_threshold = kwargs.pop("remote_rebin_threshold", None)
self.remote_convolve_threshold = kwargs.pop("remote_convolve_threshold", None)
# -----------------------------------------------------------------
def deploy_pts(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Deploying PTS remotely ...")
# Create the deployer
deployer = Deployer()
# Don't do anything locally
deployer.config.local = False
# Only deploy PTS
deployer.config.skirt = False
deployer.config.pts = True
# Set the host ids
deployer.config.hosts = [self.remote.host]
# Check versions between local and remote
deployer.config.check = self.config.check_versions
# Update PTS dependencies
deployer.config.update_dependencies = self.config.update_dependencies
# Do clean install
deployer.config.clean = self.config.deploy_clean
# Pubkey pass
deployer.config.pubkey_password = self.config.pubkey_password
# Run the deployer
deployer.run()
# -----------------------------------------------------------------
@lazyproperty
def filters(self):
"""
This function ...
:return:
"""
return {filter_name: parse_filter(filter_name) for filter_name in self.filter_names}
# -----------------------------------------------------------------
@lazyproperty
def min_filter_name(self):
"""
This function ...
:return:
"""
min_name = None
# Loop over the filters
for filter_name in self.filter_names:
if min_name is None or self.filters[filter_name].min < self.filters[min_name].min: min_name = filter_name
if min_name is None: raise RuntimeError("Something went wrong: no filters specified?")
# Return the name of the filter with minimum wavelength
return min_name
# -----------------------------------------------------------------
@lazyproperty
def max_filter_name(self):
"""
This function ...
:return:
"""
max_name = None
# Loop over the filters
for filter_name in self.filter_names:
if max_name is None or self.filters[filter_name].max > self.filters[max_name].max: max_name = filter_name
if max_name is None: raise RuntimeError("Something went wrong: no filters specified?")
# Return the name of the filter with maximum wavelength
return max_name
# -----------------------------------------------------------------
@property
def min_wavelength(self):
"""
This function ...
:return:
"""
return self.filters[self.min_filter_name].min
# -----------------------------------------------------------------
@property
def max_wavelength(self):
"""
This function ...
:return:
"""
return self.filters[self.max_filter_name].max
# -----------------------------------------------------------------
@lazyproperty
def wavelength_range(self):
"""
This function ...
:return:
"""
return QuantityRange(self.min_wavelength, self.max_wavelength)
# -----------------------------------------------------------------
def filter_names_with_image_for_instrument(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
return [filter_name for filter_name in self.filter_names if self.has_image(instr_name, filter_name)]
# -----------------------------------------------------------------
def filters_with_image_for_instrument(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
return {filter_name: parse_filter(filter_name) for filter_name in self.filter_names_with_image_for_instrument(instr_name)}
# -----------------------------------------------------------------
def filter_names_without_image_for_instrument(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
return [filter_name for filter_name in self.filter_names if not self.has_image(instr_name, filter_name)]
# -----------------------------------------------------------------
def filters_without_image_for_instrument(self, instr_name):
"""
Thisf unction ...
:param instr_name:
:return:
"""
return {filter_name: parse_filter(filter_name) for filter_name in self.filter_names_without_image_for_instrument(instr_name)}
# -----------------------------------------------------------------
def needs_remote(self, path):
"""
This function ...
:return:
"""
from ...magic.core.fits import get_npixels
# No remote is set
if self.host_id is None: return False
# File size is exceeded
if self.remote_threshold is not None and fs.file_size(path) > self.remote_threshold: return True
# Number of pixels is exceeded
if self.remote_npixels_threshold is not None and get_npixels(path) > self.remote_npixels_threshold: return True
# Remote spectral convolution
if self.has_spectral_convolution_filters and self.remote_spectral_convolution: return True
# Not remote
return False
# -----------------------------------------------------------------
def has_distance(self, instr_name):
"""
Thisf unction ...
:param instr_name:
:return:
"""
return self.distances is not None and instr_name in self.distances and self.distances[instr_name] is not None
# -----------------------------------------------------------------
def load_datacubes(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the SKIRT output datacubes ...")
# Loop over the different simulated TOTAL datacubes
for path in self.total_datacube_paths:
# Get the name of the instrument
instr_name = get_datacube_instrument_name(path, self.simulation_prefix)
# Make for this instrument?
if not self.make_for_instrument(instr_name): continue
# Check if already present
if self.has_all_images(instr_name):
if self.config.regenerate: self.remove_all_images(instr_name)
else:
log.success("All images for the '" + instr_name + "' have already been created: skipping ...")
continue
# Try loading the datacube
datacube = self.load_datacube(path, instr_name)
if datacube is None: continue
# If the distance is defined, set the distance
if self.has_distance(instr_name): datacube.distance = self.distances[instr_name]
# Convert the datacube from neutral flux density to wavelength flux density
datacube.convert_to_corresponding_wavelength_density_unit()
# Add the datacube to the dictionary
self.datacubes[instr_name] = datacube
# -----------------------------------------------------------------
def load_datacube(self, path, instr_name):
"""
This function ...
:param path:
:param instr_name:
:return:
"""
# Debugging
log.debug("Loading total datacube of '" + instr_name + "' instrument from '" + path + "' ...")
# Load datacube remotely
if self.needs_remote(path): datacube = self.load_datacube_remote(path)
# Load datacube locally
else: datacube = self.load_datacube_local(path)
# Return the datacube
return datacube
# -----------------------------------------------------------------
def load_datacube_local(self, path):
"""
Thisj function ...
:param self:
:param path:
:return:
"""
# Debugging
log.debug("Trying to load the datacube '" + path + "' locally ...")
# Slice the datacube to only the needed wavelength range?
# Load
try: datacube = DataCube.from_file(path, self.wavelength_grid, wavelength_range=self.wavelength_range)
except fits.DamagedFITSFileError as e:
log.error("The datacube '" + path + "' is damaged: images cannot be created. Skipping this datacube ...")
datacube = None
# Return the datacube
return datacube
# -----------------------------------------------------------------
def load_datacube_remote(self, path):
"""
This function ...
:param self:
:param path:
:return:
"""
# Debugging
log.debug("Trying to load the datacube '" + path + "' remotely ...")
# Slice the datacube to only the needed wavelength range?
# Load
try: datacube = RemoteDataCube.from_file(path, self.wavelength_grid, self.session, wavelength_range=self.wavelength_range)
except fits.DamagedFITSFileError as e:
log.error("The datacube '" + path + "' is damaged: images cannot be created. Skipping this datacube ...")
datacube = None
# Return
return datacube
# -----------------------------------------------------------------
def set_coordinate_systems(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the WCS of the simulated images ...")
# Loop over the different datacubes and set the WCS
for instr_name in self.datacubes:
# Check whether coordinate system is defined for this instrument
if instr_name not in self.coordinate_systems: continue
# Debugging
log.debug("Setting the coordinate system of the '" + instr_name + "' instrument ...")
# Set the coordinate system for this datacube
self.datacubes[instr_name].wcs = self.coordinate_systems[instr_name]
# -----------------------------------------------------------------
@lazyproperty
def spectral_convolution_filters(self):
"""
This function ...
:return:
"""
# No spectral convolution for any filter
if not self.config.spectral_convolution: return []
# Initialize list
filters = []
# Loop over the filters
for fltr in self.filters:
if fltr in self.no_spectral_convolution_filters: pass
else: filters.append(fltr)
# Return the list of filters
return filters
# -----------------------------------------------------------------
@lazyproperty
def nspectral_convolution_filters(self):
"""
This function ...
:return:
"""
return len(self.spectral_convolution_filters)
# -----------------------------------------------------------------
@property
def has_spectral_convolution_filters(self):
"""
This function ...
:return:
"""
return self.nspectral_convolution_filters > 0
# -----------------------------------------------------------------
def remote_intermediate_initial_path_for_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.remote_intermediate_initial_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def intermediate_initial_path_for_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.intermediate_initial_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def make_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Making the observed images (this may take a while) ...")
# Loop over the datacubes
for instr_name in self.datacubes:
# Debugging
log.debug("Making the observed images for the " + instr_name + " instrument ...")
# Get the filters that don't have an image (end result) yet saved on disk
filters_dict = self.filters_without_image_for_instrument(instr_name)
# Initialize a dictionary, indexed by the filter names, to contain the images
images = dict()
# Get the datacube
datacube = self.datacubes[instr_name]
# Check for which filters an initial image is already present
make_filter_names, make_filters = self._find_initial_images(images, datacube, filters_dict, instr_name)
# Determine the number of processes
if not self.has_spectral_convolution_filters: nprocesses = 1
else:
if isinstance(datacube, RemoteDataCube): nprocesses = self.config.nprocesses_remote
elif isinstance(datacube, DataCube): nprocesses = self.config.nprocesses_local
else: raise ValueError("Invalid datacube object for '" + instr_name + "' instrument")
# Limit the number of processes to the number of filters
nprocesses = min(len(make_filters), nprocesses)
# Create the observed images from the current datacube (the frames get the correct unit, wcs, filter)
frames = self.datacubes[instr_name].frames_for_filters(make_filters, convolve=self.spectral_convolution_filters,
nprocesses=nprocesses, check_previous_sessions=True,
check=self.config.check_wavelengths,
min_npoints = self.config.min_npoints,
min_npoints_fwhm = self.config.min_npoints_fwhm,
ignore_bad = self.config.ignore_bad,
skip_ignored_bad_convolution = self.config.skip_ignored_bad_convolution,
skip_ignored_bad_closest = self.config.skip_ignored_bad_closest)
# Add the observed images to the dictionary
for filter_name, frame in zip(make_filter_names, frames): images[filter_name] = frame # these frames can be RemoteFrames if the datacube was a RemoteDataCube
# Add the observed image dictionary for this datacube to the total dictionary (with the datacube name as a key)
self.images[instr_name] = images
# Save intermediate results
if self.config.write_intermediate: self._write_initial_images(images, instr_name, make_filter_names)
# -----------------------------------------------------------------
def _write_initial_images(self, images, instr_name, make_filter_names):
"""
This function ...
:param images:
:param instr_name:
:param make_filter_names:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Loop over the images
for filter_name in images:
# If the image didn't need to be made, it means it was already saved
if filter_name not in make_filter_names: continue
# Remote frame?
frame = images[filter_name]
if isinstance(frame, RemoteFrame):
# Determine the path
path = self.remote_intermediate_initial_path_for_image(instr_name, filter_name)
# Save the frame remotely
frame.saveto_remote(path)
# Regular frame?
elif isinstance(frame, Frame):
# Determine the path
path = self.intermediate_initial_path_for_image(instr_name, filter_name)
# Save the frame
frame.saveto(path)
# Invalid
else: raise ValueError("Something went wrong")
# -----------------------------------------------------------------
def _find_initial_images(self, images, datacube, filters_dict, instr_name):
"""
This function ...
:param images:
:param datacube:
:param filters_dict:
:param instr_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Get filters list and filter names list
filter_names = filters_dict.keys()
filters = filters_dict.values()
# Initialize
make_filter_names = []
make_filters = []
for filter_name, fltr in zip(filter_names, filters):
# Remote datacube
if isinstance(datacube, RemoteDataCube):
path = self.remote_intermediate_initial_path_for_image(instr_name, filter_name)
if self.remote.is_file(path):
# Success
log.success("Initial '" + filter_name + "' image from the '" + instr_name + "' instrument is found in the remote directory '" + self.remote_intermediate_initial_path + "': not making it again")
# Load as remote frame
frame = RemoteFrame.from_remote_file(path, self.session)
# Add to the dictionary of initial images
images[filter_name] = frame
else:
make_filter_names.append(filter_name)
make_filters.append(fltr)
# Regular datacube
elif isinstance(datacube, DataCube):
path = self.intermediate_initial_path_for_image(instr_name, filter_name)
if fs.is_file(path):
# Success
log.success("Initial '" + filter_name + "' image from the '" + instr_name + "' instrument is found in the directory '" + self.intermediate_initial_path + "': not making it again")
# Load as frame
frame = Frame.from_file(path)
# Add to the dictionary of initial images
images[filter_name] = frame
else:
make_filter_names.append(filter_name)
make_filters.append(fltr)
# Invalid
else: raise ValueError("Something went wrong")
# Return
return make_filter_names, make_filters
# -----------------------------------------------------------------
def remote_intermediate_convolve_path_for_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.remote_intermediate_convolve_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def intermediate_convolve_path_for_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.intermediate_convolve_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def has_kernel_path(self, filter_name):
"""
This function ...
:param filter_name:
:return:
"""
if not self.has_kernel_paths: return False
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
return filter_name in self.kernel_paths and self.kernel_paths[filter_name] is not None
# -----------------------------------------------------------------
def has_psf_fwhm(self, filter_name):
"""
This function ...
:param filter_name:
:return:
"""
if not self.has_psf_fwhms: return False
# Check
return filter_name in self.psf_fwhms and self.psf_fwhms[filter_name] is not None
# -----------------------------------------------------------------
def get_fwhm_for_filter(self, filter_name):
"""
Thisf unction ...
:param filter_name:
:return:
"""
# Has FWHM defined
if self.has_fwhm(filter_name): return self.fwhms[filter_name]
# Has kernel
if self.has_kernel_path(filter_name): fwhm = get_kernel_fwhm(self.kernel_paths[filter_name])
# Has FWHM
elif self.has_psf_fwhm(filter_name): fwhm = self.psf_fwhms[filter_name]
# Error
else: raise RuntimeError("Something went wrong")
# Return the FWHM
return fwhm
# -----------------------------------------------------------------
def kernel_path_for_image(self, instr_name, filter_name):
"""
Thisf unction ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.kernels_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def get_fwhm(self, filter_name):
"""
This function ...
:param filter_name:
:return:
"""
if not self.has_fwhm(filter_name): return None
else: return self.fwhms[filter_name]
# -----------------------------------------------------------------
def get_kernel_for_filter(self, filter_name, pixelscale):
"""
This function ...
:param filter_name:
:param pixelscale:
:return:
"""
# Debugging
log.debug("Loading the convolution kernel for the '" + filter_name + "' filter ...")
# Get the kernel
if self.has_kernel_path(filter_name): kernel = ConvolutionKernel.from_file(self.kernel_paths[filter_name], fwhm=self.get_fwhm(filter_name))
# Get the PSF kernel
elif self.has_psf_fwhm(filter_name): kernel = ConvolutionKernel.gaussian(self.psf_fwhms[filter_name], pixelscale)
# Error
else: raise RuntimeError("Something went wrong")
# SET FWHM IF UNDEFINED
if kernel.fwhm is None:
if self.has_fwhm(filter_name): kernel.fwhm = self.fwhms[filter_name]
else: log.warning("The FWHM of the convolution kernel for the '" + filter_name + "' image is undefined")
# Return the kernel
return kernel
# -----------------------------------------------------------------
def get_filter_names_for_convolution(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Checking for which filters convolution has to be performed on the frame ...")
# Initialize list for the filter names
filter_names = []
# Loop over the filters
for filter_name in self.images[instr_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if not self.has_kernel_path(filter_name) and not self.has_psf_fwhm(filter_name):
# Debugging
log.debug("The filter '" + filter_name + "' is not in the kernel paths nor is PSF FWHM defined: no convolution")
continue
# Check whether the end result is already there
if self.has_image(instr_name, filter_name):
log.success("The result for the '" + filter_name + "' image from the '" + instr_name + "' instrument is already present: skipping convolution ...")
continue
# Get the frame
frame = self.images[instr_name][filter_name]
# Check whether intermediate result is there
# Remote frame?
if isinstance(frame, RemoteFrame):
# Get path
path = self.remote_intermediate_convolve_path_for_image(instr_name, filter_name)
# Check
if self.remote.is_file(path):
# Success
log.success("Convolved '" + filter_name + "' image from the '" + instr_name + "' instrument is found in remote directory '" + self.remote_intermediate_convolve_path + "': not making it again")
# Load as remote frame
frame = RemoteFrame.from_remote_file(path, self.session)
# Replace the frame by the convolved frame
self.images[instr_name][filter_name] = frame
# Skip
continue
else: pass # go on
# Regular frame?
elif isinstance(frame, Frame):
# Get path
path = self.intermediate_convolve_path_for_image(instr_name, filter_name)
# Check
if fs.is_file(path):
# Success
log.success("Convolved '" + filter_name + "' image from the '" + instr_name + "' instrument is found in directory '" + self.intermediate_convolve_path + "': not making it again")
# Load as frame
frame = Frame.from_file(path)
# Replace the frame by the convolved frame
self.images[instr_name][filter_name] = frame
# Skip
continue
else: pass # go on
# Invalid
else: raise RuntimeError("Something went wrong")
# Add the filter name
filter_names.append(filter_name)
# Return the filter names
return filter_names
# -----------------------------------------------------------------
def check_fwhm_pixelscale(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
# Debugging
log.debug("Checking the ratio between the FWHM and the pixelscale ...")
# Check whether the pixelscale is defined
pixelscale = self.images[instr_name][filter_name].pixelscale
if pixelscale is None: raise ValueError("Pixelscale of the '" + filter_name + "' image of the '" + instr_name + "' datacube is not defined, convolution not possible")
# Check whether FWHM is defined
target_fwhm = self.get_fwhm_for_filter(filter_name)
if target_fwhm is None: raise ValueError("The FWHM cannot be determined for the '" + filter_name + "' image")
# Compare FWHM and pixelscale
if target_fwhm > self.config.max_fwhm_pixelscale_ratio * pixelscale.average:
# GIVE WARNING
log.warning("The target FWHM (" + tostr(target_fwhm) + ") is greater than " + tostr(self.config.max_fwhm_pixelscale_ratio) + " times the pixelscale of the image (" + tostr(pixelscale.average) + ")")
log.warning("Downsampling the image to a more reasonable pixelscale prior to convolution ...")
# Get the original FWHM to pixelscale ratio
original_fwhm_pixelscale_ratio = (target_fwhm / pixelscale.average).to("").value
# When rebinning has to be performed, check the target pixelscale
if self.needs_rebinning(instr_name, filter_name):
# Get the target coordinate system
target_wcs = self.rebin_coordinate_systems[instr_name][filter_name]
# Get the target pixelscale
target_pixelscale = target_wcs.average_pixelscale
target_downsample_factor = (target_pixelscale / pixelscale.average).to("").value
# Get the target FWHM to pixelscale ratio
target_fwhm_pixelscale_ratio = (target_fwhm / target_pixelscale).to("").value
# Get the geometric mean between original and target ratios
ratio = numbers.geometric_mean(original_fwhm_pixelscale_ratio, target_fwhm_pixelscale_ratio)
# Translate this ratio into a pixelscale
new_pixelscale = target_fwhm / ratio
# Determine the downsample factor
downsample_factor = (new_pixelscale / pixelscale.average).to("").value
downsample_factor = numbers.nearest_even_integer_below(downsample_factor, below=target_downsample_factor)
# No rebinning: we can freely choose the downsampling factor
else:
# Define the ideal FWHM to pixelscale ratio
ideal_fwhm_pixelscale_ratio = 25
# Translate this ratio into a pixelscale
ideal_pixelscale = target_fwhm / ideal_fwhm_pixelscale_ratio
# Determine the downsample factor
downsample_factor = (ideal_pixelscale / pixelscale.average).to("").value
downsample_factor = numbers.nearest_even_integer(downsample_factor)
# Debugging
log.debug("The downsampling factor is " + tostr(downsample_factor))
# DOWNSAMPLE
self.images[instr_name][filter_name].downsample(downsample_factor)
# Re-determine the pixelscale
pixelscale = self.images[instr_name][filter_name].pixelscale
# Return the pixelscale
return pixelscale
# -----------------------------------------------------------------
def convolve(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Convolving the images ...")
# Loop over the images
for instr_name in self.images:
# Debugging
log.debug("Convolving images from the '" + instr_name + "' instrument ...")
# Get the filter names
filter_names = self.get_filter_names_for_convolution(instr_name)
# Loop over the filters
for filter_name in filter_names:
# Check the ratio between the FWHM and the pixelscale
pixelscale = self.check_fwhm_pixelscale(instr_name, filter_name)
# Get kernel
kernel = self.get_kernel_for_image(instr_name, filter_name, pixelscale)
# Debugging
log.debug("Convolving the '" + filter_name + "' image of the '" + instr_name + "' instrument ...")
# Convert to remote frame if necessary
self.check_remote_convolution(instr_name, filter_name)
# Convolve the frame
self.images[instr_name][filter_name].convolve(kernel)
# If intermediate results have to be written
if self.config.write_intermediate: self.write_intermediate_convolved(instr_name, filter_name)
# -----------------------------------------------------------------
def get_kernel_for_image(self, instr_name, filter_name, pixelscale):
"""
This function ...
:param instr_name:
:param filter_name:
:param pixelscale:
:return:
"""
# Debugging
log.debug("Getting convolution kernel for '" + filter_name + "' image of the '" + instr_name + "' instrument ...")
# Determine the path to save the kernel
saved_kernel_path = self.kernel_path_for_image(instr_name, filter_name)
# Exists? -> load the kernel from file
if fs.is_file(saved_kernel_path):
# Success
log.success("Kernel file for the '" + filter_name + "' of the '" + instr_name + "' instrument is found in directory '" + self.kernels_path + "'")
# Load the kernel
kernel = ConvolutionKernel.from_file(saved_kernel_path)
# Create or get the kernel
else:
# Get the kernel
kernel = self.get_kernel_for_filter(filter_name, pixelscale)
# Write the kernel
if self.config.write_kernels: kernel.saveto(saved_kernel_path)
# Return the kernel
return kernel
# -----------------------------------------------------------------
def check_remote_convolution(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Checking '" + filter_name + "' frame for '" + instr_name + "' instrument for remote convolution ...")
# Get the frame
frame = self.images[instr_name][filter_name]
# Convert into remote frame if necessary
if self.remote_convolve_threshold is not None and isinstance(frame, Frame) and frame.data_size > self.remote_convolve_threshold:
self.images[instr_name][filter_name] = RemoteFrame.from_local(frame, self.session)
# -----------------------------------------------------------------
def write_intermediate_convolved(self, instr_name, filter_name):
"""
Thisf unction ...
:param instr_name:
:param filter_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Writing convolved '" + filter_name + "' image for '" + instr_name + "' instrument ...")
# Get the frame
frame = self.images[instr_name][filter_name]
# Remote frame?
if isinstance(frame, RemoteFrame):
# Determine the path
path = self.remote_intermediate_convolve_path_for_image(instr_name, filter_name)
# Save the frame remotely
frame.saveto_remote(path)
# Regular frame?
elif isinstance(frame, Frame):
# Determine the path
path = self.intermediate_convolve_path_for_image(instr_name, filter_name)
# Save the frame locally
frame.saveto(path)
# Invalid
else: raise ValueError("Something went wrong")
# -----------------------------------------------------------------
def remote_intermediate_rebin_path_for_image(self, instr_name, filter_name):
"""
Thisnf unction ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.remote_intermediate_rebin_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def intermediate_rebin_path_for_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
return fs.join(self.intermediate_rebin_path, instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def needs_rebinning(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
# Check if the name of the datacube appears in the rebin_wcs dictionary
if instr_name not in self.rebin_coordinate_systems: return False
# Check if the name of the image appears in the rebin_wcs[datacube_name] sub-dictionary
if filter_name not in self.rebin_coordinate_systems[instr_name]: return False
# Target coordinate system for rebinning is defined
return True
# -----------------------------------------------------------------
def get_filter_names_for_rebinning(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Checking for which filters rebinning has to be performed on the frame ...")
# Initialize list for the filter names
filter_names = []
# Loop over the filters
for filter_name in self.images[instr_name]:
# Check if the name of the image appears in the rebin_wcs[datacube_name] sub-dictionary
if filter_name not in self.rebin_coordinate_systems[instr_name]:
# Debugging
log.debug("The filter '" + filter_name + "' is not in the rebin coordinate systems for this instrument: no rebinning")
continue
# Check whether the end result is already there
if self.has_image(instr_name, filter_name):
log.success("The result for the '" + filter_name + "' image from the '" + instr_name + "' instrument is already present: skipping rebinning ...")
continue
# Get the frame
frame = self.images[instr_name][filter_name]
# Check whether intermediate result is there
# Remote frame?
if isinstance(frame, RemoteFrame):
# Get path
path = self.remote_intermediate_rebin_path_for_image(instr_name, filter_name)
# Check
if self.remote.is_file(path):
# Success
log.success("Rebinned '" + filter_name + "' image from the '" + instr_name + "' instrument is found in remote directory '" + self.remote_intermediate_rebin_path + "': not making it again")
# Load as remote frame
frame = RemoteFrame.from_remote_file(path, self.session)
# Replace the frame by the rebinned frame
self.images[instr_name][filter_name] = frame
# Skip
continue
else: pass # go on
# Regular frame
elif isinstance(frame, Frame):
# Get path
path = self.intermediate_rebin_path_for_image(instr_name, filter_name)
# Check
if fs.is_file(path):
# Success
log.success("Rebinned '" + filter_name + "' image from the '" + instr_name + "' instrument is found in directory '" + self.intermediate_rebin_path + "': not making it again")
# Load as frame
frame = Frame.from_file(path)
# Replace the frame by the rebinned frame
self.images[instr_name][filter_name] = frame
# Skip
continue
else: pass # go on
# Invalid
else: raise RuntimeError("Something went wrong")
# Add the filter name
filter_names.append(filter_name)
# Return the filter name
return filter_names
# -----------------------------------------------------------------
def get_units(self, instr_name, filter_names=None):
"""
This function ...
:param instr_name:
:param filter_names:
:return:
"""
# Set filter names
if filter_names is None: filter_names = self.images[instr_name].keys()
# Return the units
return [self.images[instr_name][filter_name].unit for filter_name in filter_names]
# -----------------------------------------------------------------
def get_pixelscales(self, instr_name, filter_names=None):
"""
This function ...
:param instr_name:
:param filter_names:
:return:
"""
# Set filter names
if filter_names is None: filter_names = self.images[instr_name].keys()
# Return the pixelscales
return [self.images[instr_name][filter_name].pixelscale for filter_name in filter_names]
# -----------------------------------------------------------------
def get_average_pixelscales(self, instr_name, filter_names=None):
"""
This function ...
:param instr_name:
:param filter_names:
:return:
"""
# Set filter names
if filter_names is None: filter_names = self.images[instr_name].keys()
# Return the pixelscales
return [self.images[instr_name][filter_name].average_pixelscale for filter_name in filter_names]
# -----------------------------------------------------------------
def rebin(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Rebinning the images to the requested coordinate systems ...")
# Loop over the datacubes
for instr_name in self.images:
# Check if the name of the datacube appears in the rebin_wcs dictionary
if instr_name not in self.rebin_coordinate_systems:
# Debugging
log.debug("The instrument '" + instr_name + "' is not in the rebin coordinate systems: no rebinning")
continue
# Get the frames for rebinning
filter_names = self.get_filter_names_for_rebinning(instr_name)
# Debugging
log.debug("Determining new unit and conversion factor prior to rebinning ...")
# Get the unit of the frames
units = self.get_units(instr_name, filter_names=filter_names)
frame_unit = sequences.get_all_equal_value(units)
# Obtain the conversion factor to intrinsic or angular area (intensity or surface brightness)
rebinning_unit = frame_unit.corresponding_angular_or_intrinsic_area_unit
distance = self.distances[instr_name] if self.has_distance(instr_name) else None
# Check whether pixelscale is the same between frames
# pixelscale = self.datacubes[instr_name].pixelscale # can be different for each frame from during convolution step (downsampling)
#pixelscales = self.get_pixelscales(instr_name, filter_names=filter_names)
pixelscales = self.get_average_pixelscales(instr_name, filter_names=filter_names)
if sequences.all_equal(pixelscales): rebinning_factor = frame_unit.corresponding_angular_or_intrinsic_area_unit_conversion_factor(distance=distance, pixelscale=pixelscales[0])
else: rebinning_factor = None
# Debugging
log.debug("Rebinning images from the '" + instr_name + "' instrument ...")
# Loop over the frames for rebinning
for filter_name in filter_names:
# Get target WCS
wcs = self.rebin_coordinate_systems[instr_name][filter_name]
# CHECK THE PIXELSCALES
if not self.config.upsample and wcs.average_pixelscale < self.images[instr_name][filter_name].average_pixelscale:
# Give warning that rebinning will not be performed
log.warning("Rebinning will not be peformed for the '" + filter_name + "' image of the '" + instr_name + "' instrument since the target pixelscale is smaller than the current pixelscale")
# Skip the rebin step for this image
continue
# Debugging
log.debug("Rebinning the '" + filter_name + "' image of the '" + instr_name + "' instrument ...")
# Check whether rebinning is required
original_unit = self.images[instr_name][filter_name].unit
needs_conversion = not original_unit.is_per_angular_or_intrinsic_area
# Set variables
back_conversion_unit = None
back_conversion_factor = None
# Convert each frame with the same factor (all the same pixelscale)
if rebinning_factor is not None:
# Debugging
log.debug("Converting the '" + filter_name + "' frame of the '" + instr_name + "' instrument to '" + tostr(rebinning_unit, add_physical_type=True) + "' with a factor of '" + tostr(rebinning_factor) + "' ...")
# Convert
self.images[instr_name][filter_name].convert_by_factor(rebinning_factor, rebinning_unit)
# For back-conversion
back_conversion_unit = frame_unit
back_conversion_factor = 1./rebinning_factor
# Needs conversion
elif needs_conversion:
# Debugging
log.debug("Converting the unit from " + tostr(original_unit, add_physical_type=True) + " to " + tostr(rebinning_unit, add_physical_type=True) + " in order to be able to perform rebinning ...")
# Convert
factor = self.images[instr_name][filter_name].convert_to(rebinning_unit)
# For back-conversion
back_conversion_unit = original_unit
back_conversion_factor = 1./factor
# Not required to convert
else: log.debug("Unit conversion prior to rebinning is not required")
# Convert to remote frame if necessary
self.check_remote_rebinning(instr_name, filter_name)
# Rebin
self.images[instr_name][filter_name].rebin(wcs)
# Convert back to the original frame unit
if back_conversion_unit is not None: self.images[instr_name][filter_name].convert_by_factor(back_conversion_factor, back_conversion_unit)
# If intermediate results have to be written
if self.config.write_intermediate: self.write_intermediate_rebinned(instr_name, filter_name)
# -----------------------------------------------------------------
def check_remote_rebinning(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Checking '" + filter_name + "' frame for '" + instr_name + "' instrument for remote rebinning ...")
# Get the frame
frame = self.images[instr_name][filter_name]
# Check criteria
if self.remote_rebin_threshold is not None and isinstance(frame, Frame) and frame.data_size > self.remote_rebin_threshold:
self.images[instr_name][filter_name] = RemoteFrame.from_local(frame, self.session)
# -----------------------------------------------------------------
def write_intermediate_rebinned(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
from ...magic.core.remote import RemoteFrame
from ...magic.core.frame import Frame
# Debugging
log.debug("Writing rebinned '" + filter_name + "' frame for '" + instr_name + "' instrument ...")
# Get the frame
frame = self.images[instr_name][filter_name]
# Remote frame?
if isinstance(frame, RemoteFrame):
# Determine the path
path = self.remote_intermediate_rebin_path_for_image(instr_name, filter_name)
# Save the frame remotely
frame.saveto_remote(path)
# Regular frame?
elif isinstance(frame, Frame):
# Determine the path
path = self.intermediate_rebin_path_for_image(instr_name, filter_name)
# Save the frame
frame.saveto(path)
# Invalid
else: raise ValueError("Something went wrong")
# -----------------------------------------------------------------
def add_sky(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adding artificial sky contribution to the images ...")
# -----------------------------------------------------------------
def add_stars(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adding artificial stars to the images ...")
# -----------------------------------------------------------------
def convert_units(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Converting the units of the images to " + str(self.unit) + " ...")
# Loop over the instruments
for instr_name in self.images:
# Loop over the images for this instrument
for filter_name in self.images[instr_name]:
# Debugging
log.debug("Converting the unit of the " + filter_name + " image of the '" + instr_name + "' instrument ...")
# Convert
factor = self.images[instr_name][filter_name].convert_to(self.unit)
# Debugging
log.debug("The conversion factor is " + str(factor))
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over the different images (self.images is a nested dictionary of dictionaries)
for instr_name in self.images.keys(): # explicit keys to avoid error that dict changed
# Debugging
log.debug("Writing the images of the '" + instr_name + "' instrument ...")
# Loop over the images for this instrument
for filter_name in self.images[instr_name].keys(): # explicit keys to avoid error that dict changed
# Debugging
log.debug("Writing the '" + filter_name + "' image ...")
# Determine the path to the output FITS file
path = self.get_image_path(instr_name, filter_name)
# Save the image
self.images[instr_name][filter_name].saveto(path)
# Remove from memory?
del self.images[instr_name][filter_name]
# Set the path
self.paths[instr_name][filter_name] = path
# Cleanup?
gc.collect()
# -----------------------------------------------------------------
def has_all_images(self, instr_name):
"""
Thisf unction ...
:param instr_name:
:return:
"""
# Loop over all filter names
for filter_name in self.filter_names:
if not self.has_image(instr_name, filter_name): return False
# All checks passed
return True
# -----------------------------------------------------------------
def has_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
path = self.get_image_path(instr_name, filter_name)
return fs.is_file(path) and fits.is_valid(path)
# -----------------------------------------------------------------
def remove_all_images(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
# Loop over the filters
for filter_name in self.filter_names:
# Get path
path = self.get_image_path(instr_name, filter_name)
# Remove if existing
fs.remove_file_if_present(path)
# -----------------------------------------------------------------
def remove_image(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
# Determine the path
path = self.get_image_path(instr_name, filter_name)
# Remove if existing
fs.remove_file_if_present(path)
# -----------------------------------------------------------------
def get_instrument_plot_path(self, instr_name):
"""
This function ...
:param instr_name:
:return:
"""
# Group per instrument
if self.config.group:
# Get instrument directory path
instrument_path = self.output_path_directory(instr_name, create=True)
# Return the filepath
return fs.join(instrument_path, "images.pdf")
# Don't group
else: return self.output_path_file(instr_name + ".pdf")
# -----------------------------------------------------------------
def get_image_path(self, instr_name, filter_name):
"""
This function ...
:param instr_name:
:param filter_name:
:return:
"""
# Group per instrument
if self.config.group:
# Determine path for instrument directory (and create)
if self.output_paths_instruments is not None and instr_name in self.output_paths_instruments: instrument_path = self.output_paths_instruments[instr_name]
else: instrument_path = self.output_path_directory(instr_name, create=True)
# Return the filepath
return fs.join(instrument_path, filter_name + ".fits")
# Don't group
else: return self.output_path_file(instr_name + "__" + filter_name + ".fits")
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Images
if self.config.plot_images: self.plot_images()
# -----------------------------------------------------------------
def plot_images(self):
"""
This function ...
:return:
"""
from ...magic.plot.imagegrid import StandardImageGridPlotter
# Inform the user
log.info("Plotting the images ...")
# Loop over the different images
for instr_name in self.images.keys():
# Debugging
log.debug("Plotting the images for the '" + instr_name + "' instrument ...")
# Determine plot path
plot_path = self.get_instrument_plot_path(instr_name)
# Create plotter
plotter = StandardImageGridPlotter()
# Set output directory
plotter.config.output = plot_path
# Extra
plotter.config.normalize = True
# plotter.config.colormap =
# Write data
plotter.config.write = False
# Rebin and crop
# plotter.rebin_to =
# plotter.crop_to =
# Loop over the images for this instrument
for filter_name in self.images[instr_name].keys():
# Add the frame
frame = self.images[instr_name][filter_name]
plotter.add_frame(frame)
# Run the plotter
plotter.run()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing intermediate results ...")
# TODO
# -----------------------------------------------------------------
|
SKIRT/PTS
|
core/misc/images.py
|
Python
|
agpl-3.0
| 83,531
|
[
"Gaussian"
] |
13dd408c714e0dc8f7b75accd81d1da1c5841871162fd4f6978f7557a01a3ea8
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from unittest.case import skip
from commoncode.testcase import FileBasedTesting
from licensedcode import index
from licensedcode.match import LicenseMatch
from licensedcode.match import get_texts
from licensedcode import models
from licensedcode.models import Rule
from licensedcode.spans import Span
from licensedcode import match_aho
from licensedcode import match_seq
from license_test_utils import print_matched_texts
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
"""
Test the core license detection mechanics.
"""
class TestIndexMatch(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_does_not_return_matches_for_empty_query(self):
idx = index.LicenseIndex([Rule(_text='A one. A two. license A three.')])
matches = idx.match(query_string='')
assert [] == matches
matches = idx.match(query_string=None)
assert [] == matches
def test_match_does_not_return_matches_for_junk_queries(self):
idx = index.LicenseIndex([Rule(_text='A one. a license two. license A three.')])
assert [] == idx.match(query_string=u'some other junk')
assert [] == idx.match(query_string=u'some junk')
def test_match_return_one_match_with_correct_offsets(self):
idx = index.LicenseIndex([Rule(_text='A one. a license two. A three.', licenses=['abc'])])
querys = u'some junk. A one. A license two. A three.'
# 0 1 2 3 4 5 6 7 8
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=querys, idx=idx)
assert 'A one A license two A three' == qtext
assert 'A one a license two A three' == itext
assert Span(0, 6) == match.qspan
assert Span(0, 6) == match.ispan
def test_match_can_match_exactly_rule_text_used_as_query(self):
test_file = self.get_test_loc('detect/mit/mit.c')
rule = Rule(text_file=test_file, licenses=['mit'])
idx = index.LicenseIndex([rule])
matches = idx.match(test_file)
assert 1 == len(matches)
match = matches[0]
assert rule == match.rule
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
assert 100 == match.coverage()
assert 100 == match.score()
def test_match_matches_correctly_simple_exact_query_1(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/mit2.c')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
assert ftr == match.rule
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
def test_match_matches_correctly_simple_exact_query_across_query_runs(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/mit3.c')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, location=query_doc, idx=idx)
expected_qtext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in THE SOFTWARE WITHOUT RESTRICTION INCLUDING WITHOUT LIMITATION THE
RIGHTS TO USE COPY MODIFY MERGE PUBLISH DISTRIBUTE SUBLICENSE AND OR SELL
COPIES of the Software and to permit persons to whom the Software is
furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in the Software without restriction including without limitation
the rights to use copy modify merge publish distribute sublicense and or
sell copies of the Software and to permit persons to whom the Software
is furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_itext == itext.split()
def test_match_with_surrounding_junk_should_return_an_exact_match(self):
tf1 = self.get_test_loc('detect/mit/mit.c')
ftr = Rule(text_file=tf1, licenses=['mit'])
idx = index.LicenseIndex([ftr])
query_loc = self.get_test_loc('detect/mit/mit4.c')
matches = idx.match(query_loc)
assert len(matches) == 1
match = matches[0]
qtext, itext = get_texts(match, location=query_loc, idx=idx)
expected_qtext = u'''
Permission [add] [text] is hereby granted free of charge to any person
obtaining a copy of this software and associated documentation files the
Software to deal in the Software without restriction including without
limitation the rights to use copy modify merge publish distribute
sublicense and or sell copies of the Software and to permit persons to
whom the Software is furnished to do so subject to the following
conditions The above copyright [add] [text] notice and this permission
notice shall be included in all copies or substantial portions of the
Software
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
Permission is hereby granted free of charge to any person obtaining a
copy of this software and associated documentation files the Software to
deal in the Software without restriction including without limitation the
rights to use copy modify merge publish distribute sublicense and or sell
copies of the Software and to permit persons to whom the Software is
furnished to do so subject to the following conditions The above
copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software
'''.split()
assert expected_itext == itext.split()
assert Span(0, 86) == match.qspan
assert Span(0, 86) == match.ispan
assert 100 == match.score()
def test_match_can_match_approximately(self):
rule_file = self.get_test_loc('approx/mit/mit.c')
rule = Rule(text_file=rule_file, licenses=['mit'])
idx = index.LicenseIndex([rule])
query_doc = self.get_test_loc('approx/mit/mit4.c')
matches = idx.match(query_doc)
assert 2 == len(matches)
m1 = matches[0]
m2 = matches[1]
assert rule == m1.rule
assert rule == m2.rule
assert 100 == m1.coverage()
assert 100 == m2.coverage()
assert 100 == m1.score()
assert 100 == m2.score()
def test_match_return_correct_positions_with_short_index_and_queries(self):
idx = index.LicenseIndex([Rule(_text='MIT License', licenses=['mit'])])
matches = idx.match(query_string='MIT License')
assert 1 == len(matches)
assert {'_tst_11_0': {'mit': [0]}} == idx.to_dict()
qtext, itext = get_texts(matches[0], query_string='MIT License', idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
matches = idx.match(query_string='MIT MIT License')
assert 1 == len(matches)
qtext, itext = get_texts(matches[0], query_string='MIT MIT License', idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(1, 2) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
query_doc1 = 'do you think I am a mit license MIT License, yes, I think so'
# # 0 1 2 3
matches = idx.match(query_string=query_doc1)
assert 2 == len(matches)
qtext, itext = get_texts(matches[0], query_string=query_doc1, idx=idx)
assert 'mit license' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
qtext, itext = get_texts(matches[1], query_string=query_doc1, idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(2, 3) == matches[1].qspan
assert Span(0, 1) == matches[1].ispan
query_doc2 = '''do you think I am a mit license
MIT License
yes, I think so'''
matches = idx.match(query_string=query_doc2)
assert 2 == len(matches)
qtext, itext = get_texts(matches[0], query_string=query_doc2, idx=idx)
assert 'mit license' == qtext
assert 'MIT License' == itext
assert Span(0, 1) == matches[0].qspan
assert Span(0, 1) == matches[0].ispan
qtext, itext = get_texts(matches[1], query_string=query_doc2, idx=idx)
assert 'MIT License' == qtext
assert 'MIT License' == itext
assert Span(2, 3) == matches[1].qspan
assert Span(0, 1) == matches[1].ispan
def test_match_simple_rule(self):
tf1 = self.get_test_loc('detect/mit/t1.txt')
ftr = Rule(text_file=tf1, licenses=['bsd-original'])
idx = index.LicenseIndex([ftr])
query_doc = self.get_test_loc('detect/mit/t2.txt')
matches = idx.match(query_doc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 241) == match.qspan
assert Span(0, 241) == match.ispan
assert (1, 27,) == match.lines()
assert 100 == match.coverage()
assert 100 == match.score()
def test_match_works_with_special_characters_1(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_2(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos1.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_3(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos2.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_match_works_with_special_characters_4(self):
test_file = self.get_test_loc('detect/specialcharacter/kerberos3.txt')
idx = index.LicenseIndex([Rule(text_file=test_file, licenses=['kerberos'])])
assert 1 == len(idx.match(test_file))
def test_overlap_detection1(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# * License texts to detect:
# +- license 3 -----------+
# | +-license 2 --------+ |
# | | +-license 1 --+ | |
# | +-------------------+ |
# +-----------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
license3 = '''
this license source
Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.
has a permitted license'''
license4 = '''My Redistributions is permitted.
Redistribution and use permitted.
Use is permitted too.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
rule3 = Rule(_text=license3, licenses=['overlap'])
rule4 = Rule(_text=license4, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2, rule3, rule4])
querys = 'Redistribution and use bla permitted.'
# test : license1 is in the index and contains no other rule. should return rule1 at exact coverage.
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 3) == match.qspan
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection2(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
# test : license2 contains license1: return license2 as exact coverage
querys = 'Redistribution and use bla permitted.'
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection2_exact(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
# test : license2 contains license1: return license2 as exact coverage
querys = 'Redistribution and use bla permitted.'
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use [bla] permitted' == qtext
def test_overlap_detection3(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# * License texts to detect:
# +- license 3 -----------+
# | +-license 2 --------+ |
# | | +-license 1 --+ | |
# | +-------------------+ |
# +-----------------------+
#
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.
My code.'''
# test : querys contains license2 that contains license1: return license2 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule2 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
expected = '''
Redistributions of source must retain copyright
Redistribution and use permitted
Redistributions in binary form is permitted'''.split()
assert expected == qtext.split()
def test_overlap_detection4(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistribution and use permitted.
My code.'''
# test : querys contains license1: return license1 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use permitted' == qtext
def test_overlap_detection5(self):
# test this containment relationship between test and index licenses:
# * Index licenses:
# +-license 2 --------+
# | +-license 1 --+ |
# +-------------------+
#
# +-license 4 --------+
# | +-license 1 --+ |
# +-------------------+
# setup index
license1 = '''Redistribution and use permitted for MIT license.'''
license2 = '''Redistributions of source must retain copyright.
Redistribution and use permitted for MIT license.
Redistributions in binary form is permitted.'''
rule1 = Rule(_text=license1, licenses=['overlap'])
rule2 = Rule(_text=license2, licenses=['overlap'])
idx = index.LicenseIndex([rule1, rule2])
querys = '''My source.
Redistribution and use permitted for MIT license.
My code.'''
# test : querys contains license1: return license1 as exact coverage
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert rule1 == match.rule
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert 'Redistribution and use permitted for MIT license' == qtext
def test_fulltext_detection_works_with_partial_overlap_from_location(self):
test_doc = self.get_test_loc('detect/templates/license3.txt')
idx = index.LicenseIndex([Rule(text_file=test_doc, licenses=['mylicense'])])
query_loc = self.get_test_loc('detect/templates/license4.txt')
matches = idx.match(query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 41) == match.qspan
assert Span(0, 41) == match.ispan
assert 100 == match.coverage()
assert 100 == match.score()
qtext, _itext = get_texts(match, location=query_loc, idx=idx)
expected = '''
is free software you can redistribute it and or modify it under the terms
of the GNU Lesser General Public License as published by the Free
Software Foundation either version 2 1 of the License or at your option
any later version
'''.split()
assert expected == qtext.split()
class TestIndexMatchWithTemplate(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_can_match_with_plain_rule_simple(self):
tf1_text = u'''X11 License
Copyright (C) 1996 X Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above copyright
notice and this permission notice shall be included in all copies or
substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS",
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the
name of the X Consortium shall not be used in advertising or otherwise to
promote the sale, use or other dealings in this Software without prior
written authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=tf1_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
matches = idx.match(query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 216) == match.qspan
def test_match_can_match_with_plain_rule_simple2(self):
rule_text = u'''X11 License
Copyright (C) 1996 X Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above copyright
notice and this permission notice shall be included in all copies or
substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS",
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the
name of the X Consortium shall not be used in advertising or otherwise to
promote the sale, use or other dealings in this Software without prior
written authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
matches = idx.match(location=query_loc)
assert 1 == len(matches)
expected_qtext = u'''
X11 License Copyright C 1996 X Consortium Permission is hereby granted free
of charge to any person obtaining a copy of this software and associated
documentation files the Software to deal in the Software without restriction
including without limitation the rights to use copy modify merge publish
distribute sublicense and or sell copies of the Software and to permit
persons to whom the Software is furnished to do so subject to the following
conditions The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software THE SOFTWARE
IS PROVIDED AS IS WITHOUT WARRANTY OF ANY KIND EXPRESS OR IMPLIED INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR
ANY CLAIM DAMAGES OR OTHER LIABILITY WHETHER IN AN ACTION OF CONTRACT TORT OR
OTHERWISE ARISING FROM OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE Except as contained in this notice the name
of the X Consortium shall not be used in advertising or otherwise to promote
the sale use or other dealings in this Software without prior written
authorization from the X Consortium X Window System is a trademark of X
Consortium Inc
'''.split()
match = matches[0]
qtext, _itext = get_texts(match, location=query_loc, idx=idx)
assert expected_qtext == qtext.split()
def test_match_can_match_with_simple_rule_template2(self):
rule_text = u'''
IN NO EVENT SHALL THE {{X CONSORTIUM}}
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_string = u'''
IN NO EVENT SHALL THE Y CORP
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
matches = idx.match(query_string=query_string)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=query_string, idx=idx)
expected_qtokens = u'''
IN NO EVENT SHALL THE [Y] [CORP] BE LIABLE FOR ANY CLAIM DAMAGES OR OTHER
LIABILITY WHETHER IN AN ACTION OF CONTRACT TORT OR OTHERWISE ARISING FROM OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
'''.split()
expected_itokens = u'''
IN NO EVENT SHALL THE BE LIABLE FOR ANY CLAIM DAMAGES OR OTHER LIABILITY
WHETHER IN AN ACTION OF CONTRACT TORT OR OTHERWISE ARISING FROM OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
'''.split()
assert expected_qtokens == qtext.split()
assert expected_itokens == itext.split()
def test_match_can_match_with_rule_template_with_inter_gap_of_2(self):
# in this template text there are only 2 tokens between the two templates markers
test_text = u'''Redistributions in binary form must
{{}} reproduce the {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce the word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 100 == match.coverage()
assert 50 == match.score()
assert Span(0, 9) == match.qspan
assert Span(0, 9) == match.ispan
def test_match_can_match_with_rule_template_with_inter_gap_of_3(self):
# in this template there are 3 tokens between the two template markers
test_text = u'''Redistributions in binary form must
{{}} reproduce the stipulated {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce the stipulated word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 100 == match.coverage()
assert 55 == match.score()
assert Span(0, 10) == match.qspan
assert Span(0, 10) == match.ispan
def test_match_can_match_with_rule_template_with_inter_gap_of_4(self):
# in this template there are 4 tokens between the two templates markers
test_text = u'''Redistributions in binary form must
{{}} reproduce as is stipulated {{}}above copyright notice'''
rule = Rule(_text=test_text, licenses=['mylicense'])
idx = index.LicenseIndex([rule])
querys = u'''Redistributions in binary form must nexB company
reproduce as is stipulated the word for word above copyright notice.'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 11) == match.qspan
assert Span(0, 11) == match.ispan
def test_match_can_match_with_rule_template_for_public_domain(self):
test_text = '''
I hereby abandon any property rights to {{SAX 2.0 (the Simple API for
XML)}}, and release all of {{the SAX 2.0 }} source code, compiled code,
and documentation contained in this distribution into the Public Domain.
'''
rule = Rule(_text=test_text, licenses=['public-domain'])
idx = index.LicenseIndex([rule])
querys = '''
SAX2 is Free!
I hereby abandon any property rights to SAX 2.0 (the Simple API for
XML), and release all of the SAX 2.0 source code, compiled code, and
documentation contained in this distribution into the Public Domain. SAX
comes with NO WARRANTY or guarantee of fitness for any purpose.
SAX2 is Free!
'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
qtext, itext = get_texts(match, query_string=querys, idx=idx)
expected_qtext = u'''
I hereby abandon any property rights to [SAX] [2] [0] <the> [Simple] [API] [for] [XML]
<and> <release> <all> <of> <the> [SAX] [2] [0]
source code compiled code and documentation contained in this distribution
into the Public Domain
'''.split()
assert expected_qtext == qtext.split()
expected_itext = u'''
I hereby abandon any property rights to
<and> <release> <all> <of>
source code compiled code and documentation contained in this distribution
into the Public Domain
'''.split()
assert expected_itext == itext.split()
assert 80 < match.coverage()
assert 84 == match.score()
assert Span(0, 6) | Span(13, 26) == match.qspan
assert Span(0, 6) | Span(11, 24) == match.ispan
def test_match_can_match_with_rule_template_with_gap_near_start_with_few_tokens_before(self):
# failed when a gapped token starts at a beginning of rule with few tokens before
test_file = self.get_test_loc('detect/templates/license7.txt')
rule = Rule(text_file=test_file, licenses=['lic'])
idx = index.LicenseIndex([rule])
qloc = self.get_test_loc('detect/templates/license8.txt')
matches = idx.match(qloc)
assert 1 == len(matches)
match = matches[0]
expected_qtokens = u"""
All Rights Reserved Redistribution and use of this software and associated
documentation Software with or without modification are permitted provided
that the following conditions are met
1 Redistributions of source code must retain copyright statements and notices
Redistributions must also contain a copy of this document
2 Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation and
or other materials provided with the distribution
3 The name [groovy] must not be used to endorse or promote products derived
from this Software without prior written permission of <The> [Codehaus] For
written permission please contact [info] [codehaus] [org]
4 Products derived from this Software may not be called [groovy] nor may
[groovy] appear in their names without prior written permission of <The>
[Codehaus]
[groovy] is a registered trademark of <The> [Codehaus]
5 Due credit should be given to <The> [Codehaus]
[http] [groovy] [codehaus] [org]
<THIS> <SOFTWARE> <IS> <PROVIDED> <BY> <THE> [CODEHAUS] <AND> <CONTRIBUTORS>
AS IS AND ANY EXPRESSED OR IMPLIED WARRANTIES INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED IN NO EVENT SHALL <THE> [CODEHAUS] OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT INDIRECT INCIDENTAL SPECIAL EXEMPLARY
OR CONSEQUENTIAL DAMAGES INCLUDING BUT NOT LIMITED TO PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES LOSS OF USE DATA OR PROFITS OR BUSINESS
INTERRUPTION HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY WHETHER IN
CONTRACT STRICT LIABILITY OR TORT INCLUDING NEGLIGENCE OR OTHERWISE ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE
""".split()
expected_itokens = u''' All Rights Reserved Redistribution and use of this
software and associated documentation Software with or without modification
are permitted provided that the following conditions are met
1 Redistributions of source code must retain copyright statements and notices
Redistributions must also contain a copy of this document
2 Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation and
or other materials provided with the distribution
3 The name must not be used to endorse or promote products derived from this
Software without prior written permission of For written permission please
contact
4 Products derived from this Software may not be called nor may appear in
their names without prior written permission of is a registered trademark of
5 Due credit should be given to
<THIS> <SOFTWARE> <IS> <PROVIDED> <BY>
AS IS AND ANY EXPRESSED OR IMPLIED WARRANTIES INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED IN NO EVENT SHALL OR ITS CONTRIBUTORS BE LIABLE FOR
ANY DIRECT INDIRECT INCIDENTAL SPECIAL EXEMPLARY OR CONSEQUENTIAL DAMAGES
INCLUDING BUT NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS
OF USE DATA OR PROFITS OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY WHETHER IN CONTRACT STRICT LIABILITY OR TORT INCLUDING
NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
'''.split()
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert expected_qtokens == qtext.split()
assert expected_itokens == itext.split()
assert 97 < match.coverage()
assert 97 < match.score()
expected = Span(2, 98) | Span(100, 125) | Span(127, 131) | Span(133, 139) | Span(149, 178) | Span(180, 253)
assert expected == match.qspan
assert Span(1, 135) | Span(141, 244) == match.ispan
def test_match_can_match_with_index_built_from_rule_directory_with_sun_bcls(self):
rule_dir = self.get_test_loc('detect/rule_template/rules')
idx = index.LicenseIndex(models.load_rules(rule_dir))
# at line 151 the query has an extra "Software" word inserted to avoid hash matching
query_loc = self.get_test_loc('detect/rule_template/query.txt')
matches = idx.match(location=query_loc)
assert 1 == len(matches)
match = matches[0]
assert Span(0, 957) | Span(959, 1756) == match.qspan
assert match_seq.MATCH_SEQ == match.matcher
class TestMatchAccuracyWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def check_position(self, test_path, expected, with_span=True, print_results=False):
"""
Check license detection in file or folder against expected result.
Expected is a list of (license, lines span, qspan span) tuples.
"""
test_location = self.get_test_loc(test_path)
results = []
# FULL INDEX!!
idx = index.get_index()
matches = idx.match(test_location)
for match in matches:
for detected in match.rule.licenses:
if print_results:
print()
print(match)
print_matched_texts(match, location=test_location, idx=idx)
results.append((detected, match.lines(), with_span and match.qspan or None))
assert expected == results
def test_match_has_correct_positions_basic(self):
idx = index.get_index()
querys = u'''Licensed under the GNU General Public License (GPL).
Licensed under the GNU General Public License (GPL).
Licensed under the GNU General Public License (GPL).'''
matches = idx.match(query_string=querys)
rule = [r for r in idx.rules_by_rid if r.identifier == 'gpl_69.RULE'][0]
m1 = LicenseMatch(rule=rule, qspan=Span(0, 7), ispan=Span(0, 7), start_line=1, end_line=1)
m2 = LicenseMatch(rule=rule, qspan=Span(8, 15), ispan=Span(0, 7), start_line=2, end_line=2)
m3 = LicenseMatch(rule=rule, qspan=Span(16, 23), ispan=Span(0, 7), start_line=3, end_line=3)
assert [m1, m2, m3] == matches
def test_match_has_correct_line_positions_for_query_with_repeats(self):
expected = [
# licenses, match.lines(), qtext,
([u'apache-2.0'], (1, 2), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (3, 4), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (5, 6), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (7, 8), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
([u'apache-2.0'], (9, 10), u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt'),
]
test_path = 'positions/license1.txt'
test_location = self.get_test_loc(test_path)
idx = index.get_index()
matches = idx.match(test_location)
for i, match in enumerate(matches):
ex_lics, ex_lines, ex_qtext = expected[i]
qtext, _itext = get_texts(match, location=test_location, idx=idx)
try:
assert ex_lics == match.rule.licenses
assert ex_lines == match.lines()
assert ex_qtext == qtext
except AssertionError:
assert expected[i] == (match.rule.licenses, match.lines(), qtext)
def test_match_does_not_return_spurious_match(self):
expected = []
self.check_position('positions/license2.txt', expected)
def test_match_has_correct_line_positions_for_repeats(self):
# we had a weird error where the lines were not computed correctly
# when we had more than one file detected at a time
expected = [
# detected, match.lines(), match.qspan,
(u'apache-2.0', (1, 2), Span(0, 15)),
(u'apache-2.0', (3, 4), Span(16, 31)),
(u'apache-2.0', (5, 6), Span(32, 47)),
(u'apache-2.0', (7, 8), Span(48, 63)),
(u'apache-2.0', (9, 10), Span(64, 79)),
]
self.check_position('positions/license3.txt', expected)
def test_match_works_for_apache_rule(self):
idx = index.get_index()
querys = u'''I am not a license.
The Apache Software License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0.txt
'''
matches = idx.match(query_string=querys)
assert 1 == len(matches)
match = matches[0]
assert 'apache-2.0_8.RULE' == match.rule.identifier
assert match_aho.MATCH_AHO_EXACT == match.matcher
qtext, _itext = get_texts(match, query_string=querys, idx=idx)
assert u'The Apache Software License Version 2 0 http www apache org licenses LICENSE 2 0 txt' == qtext
assert (3, 4) == match.lines()
def test_match_does_not_detect_spurrious_short_apache_rule(self):
idx = index.get_index()
querys = u'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
<title>Apache log4j 1.2 - Continuous Integration</title>
'''
matches = idx.match(query_string=querys)
assert [] == matches
def test_match_handles_negative_rules_and_does_not_match_negative_regions_properly(self):
# note: this test relies on the negative rule: not-a-license_busybox_2.RULE
# with this text:
# "libbusybox is GPL, not LGPL, and exports no stable API that might act as a copyright barrier."
# and relies on the short rules that detect GPL and LGPL
idx = index.get_index()
# lines 3 and 4 should NOT be part of any matches
# they should match the negative "not-a-license_busybox_2.RULE"
negative_lines_not_to_match = 3, 4
querys = u'''
licensed under the LGPL license
libbusybox is GPL, not LGPL, and exports no stable API
that might act as a copyright barrier.
for the license
license: dual BSD/GPL
'''
matches = idx.match(query_string=querys)
for match in matches:
for line in negative_lines_not_to_match:
assert line not in match.lines()
def test_match_has_correct_line_positions_in_automake_perl_file(self):
# reported as https://github.com/nexB/scancode-toolkit/issues/88
expected = [
# detected, match.lines(), match.qspan,
(u'gpl-2.0-plus', (12, 25), Span(48, 159)),
(u'fsf-mit', (231, 238), Span(950, 1014)),
(u'free-unknown', (306, 307), Span(1291, 1314))
]
self.check_position('positions/automake.pl', expected)
class TestMatchBinariesWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_match_in_binary_lkms_1(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/ath_pci.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['bsd-new', 'gpl-2.0'] == match.rule.licenses
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license Dual BSD GPL' == qtext
assert 'license Dual BSD GPL' == itext
def test_match_in_binary_lkms_2(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/eeepc_acpi.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['gpl'] == match.rule.licenses
assert match.ispan == Span(0, 1)
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license GPL' == qtext
assert 'License GPL' == itext
def test_match_in_binary_lkms_3(self):
idx = index.get_index()
qloc = self.get_test_loc('positions/wlan_xauth.ko')
matches = idx.match(location=qloc)
assert 1 == len(matches)
match = matches[0]
assert ['bsd-new', 'gpl-2.0'] == match.rule.licenses
assert 100 == match.coverage()
assert 20 == match.score()
qtext, itext = get_texts(match, location=qloc, idx=idx)
assert 'license Dual BSD GPL' == qtext
assert 'license Dual BSD GPL' == itext
assert Span(0, 3) == match.ispan
@skip('Needs review')
class TestToFix(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_detection_in_complex_json(self):
# NOTE: this test cannot pass as we do not have several of the licenses
# listed in this JSON
test_file = self.get_test_loc('detect/json/all.json')
import json
item_map = json.load(test_file)
for item in item_map:
itemid = item_map[item
]['id',
]
content = itemid + ' \n ' + item_map[item
]['url',
] + ' \n ' + item_map[item
]['title',
]
tmp_file = self.get_temp_file()
fh = open(tmp_file, 'w')
fh.write(content)
fh.close()
|
yasharmaster/scancode-toolkit
|
tests/licensedcode/test_detect.py
|
Python
|
apache-2.0
| 47,823
|
[
"VisIt"
] |
860cdeac874f037774638adf1376c3c510d0e22fd341d9a85e3e44143d6e249d
|
import os
import re
import sys
import numpy
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from Cython.Build import cythonize
libraries = []
if os.name == "posix":
libraries.append("m")
include_dirs = [
"../C",
numpy.get_include(),
]
ext = cythonize([
Extension("ttvfaster._ttvfaster",
sources=["../C/ttvfaster.c", "ttvfaster/_ttvfaster.pyx"],
libraries=libraries, include_dirs=include_dirs)
])
setup(
name="ttvfaster",
version="0.0.1",
author="Eric Agol, Kat Deck, Daniel Foreman-Mackey",
url="https://github.com/ericagol/TTVFaster",
license="MIT",
packages=["ttvfaster", ],
ext_modules=ext,
# description="Blazingly fast Gaussian Processes for regression.",
# long_description=open("README.rst").read(),
classifiers=[
# "Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
|
ericagol/TTVFaster
|
Python/setup.py
|
Python
|
mit
| 1,132
|
[
"Gaussian"
] |
292106538d0ba2fd692df98cc4712368d70753145122b1f135c4fce0f106fa8c
|
from __future__ import division
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
#from sklearn.metrics import classification_report
import sklearn.metrics as mtrx
from sklearn.neighbors import KNeighborsClassifier as KNC
from sklearn.ensemble import RandomForestClassifier as RF
import swap
import machine_utils as ml
#import metrics as mtrx
from metrics import compute_binary_metrics
from optparse import OptionParser
from astropy.table import Table
import pdb
import numpy as np
import datetime as dt
import os, subprocess, sys
import cPickle
'''
Workflow:
access morphology database
accept labels/data for training
accept labels/data for testing
"whiten" data (normalize)
{reduce dimensions} (optional)s
train the machine classifier
run machine classifier on test sample
'''
def MachineClassifier(options, args):
"""
NAME
MachineClassifier.py
PURPOSE
Machine learning component of Galaxy Zoo Express
Read in a training sample generated by human users (which have
previously been analyzed by SWAP).
Learn on the training sample and moniter progress.
Once "fully trained", apply learned model to test sample.
COMMENTS
Lots I'm sure.
FLAGS
-h Print this message
-c config file name
"""
#-----------------------------------------------------------------------
# LOAD CONFIG FILE PARAMETERS
#-----------------------------------------------------------------------
# Check for config file in array args:
if (len(args) >= 1) or (options.configfile):
if args: config = args[0]
elif options.configfile: config = options.configfile
print swap.doubledashedline
print swap.ML_hello
print swap.doubledashedline
print "ML: taking instructions from",config
else:
print MachineClassifier.__doc__
return
machine_sim_directory = 'sims_Machine/redo_with_circular_morphs/'
tonights = swap.Configuration(config)
# Read the pickled random state file
random_file = open(tonights.parameters['random_file'],"r");
random_state = cPickle.load(random_file);
random_file.close();
np.random.set_state(random_state)
time = tonights.parameters['start']
# Get the machine threshold (to make retirement decisions)
swap_thresholds = {}
swap_thresholds['detection'] = tonights.parameters['detection_threshold']
swap_thresholds['rejection'] = tonights.parameters['rejection_threshold']
threshold = tonights.parameters['machine_threshold']
prior = tonights.parameters['prior']
# Get list of evaluation metrics and criteria
eval_metrics = tonights.parameters['evaluation_metrics']
# How much cross-validation should we do?
cv = tonights.parameters['cross_validation']
survey = tonights.parameters['survey']
# To generate training labels based on the subject probability,
# we need to know what should be considered the positive label:
# i.e., GZ2 has labels (in metadatafile) Smooth = 1, Feat = 0
# Doing a Smooth or Not run, the positive label is 1
# Doing a Featured or Not run, the positive label is 0
pos_label = tonights.parameters['positive_label']
#----------------------------------------------------------------------
# read in the metadata for all subjects
storage = swap.read_pickle(tonights.parameters['metadatafile'], 'metadata')
# 11TH HOUR QUICK FIX CUZ I FUCKED UP. MB 10/27/16
if 'GZ2_raw_combo' not in storage.subjects.colnames:
gz2_metadata = Table.read('metadata_ground_truth_labels.fits')
storage.subjects['GZ2_raw_combo'] = gz2_metadata['GZ2_raw_combo']
swap.write_pickle(storage, tonights.parameters['metadatafile'])
subjects = storage.subjects
#----------------------------------------------------------------------
# read in the PROJECT COLLECTION -- (shared between SWAP/Machine)
#sample = swap.read_pickle(tonights.parameters['samplefile'],'collection')
# read in or create the ML bureau for machine agents (history for Machines)
MLbureau = swap.read_pickle(tonights.parameters['MLbureaufile'],'bureau')
#-----------------------------------------------------------------------
# FETCH TRAINING & VALIDATION SAMPLES
#-----------------------------------------------------------------------
train_sample = storage.fetch_subsample(sample_type='train',
class_label='GZ2_raw_combo')
""" Notes about the training sample:
# this will select only those which have my morphology measured for them
# AND which have "ground truth" according to GZ2
# Eventually we could open this up to include the ~10k that aren't in the
# GZ Main Sample but I think, for now, we should reduce ourselves to this
# stricter sample so that we always have back-up "truth" for each galaxy.
"""
try:
train_meta, train_features = ml.extract_features(train_sample,
keys=['M20_corr', 'C_corr', 'E', 'A_corr', 'G_corr'])
original_length = len(train_meta)
except TypeError:
print "ML: can't extract features from subsample."
print "ML: Exiting MachineClassifier.py"
sys.exit()
else:
# TODO: consider making this part of SWAP's duties?
# 5/18/16: Only use those subjects which are no longer on the prior
off_the_fence = np.where(train_meta['SWAP_prob']!=prior)
train_meta = train_meta[off_the_fence]
train_features = train_features[off_the_fence]
train_labels = np.array([pos_label if p > prior else 1-pos_label
for p in train_meta['SWAP_prob']])
shortened_length = len(train_meta)
print "ML: found a training sample of %i subjects"%shortened_length
removed = original_length - shortened_length
print "ML: %i subjects removed to create balanced training sample"%removed
valid_sample = storage.fetch_subsample(sample_type='valid',
class_label='Expert_label')
try:
valid_meta, valid_features = ml.extract_features(valid_sample,
keys=['M20_corr', 'C_corr', 'E', 'A_corr', 'G_corr'])
except:
print "ML: there are no subjects with the label 'valid'!"
else:
valid_labels = valid_meta['Expert_label'].filled()
print "ML: found a validation sample of %i subjects"%len(valid_meta)
# ---------------------------------------------------------------------
# Require a minimum size training sample [Be reasonable, my good man!]
# ---------------------------------------------------------------------
if len(train_sample) < 10000:
print "ML: training sample is too small to be worth anything."
print "ML: Exiting MachineClassifier.py"
sys.exit()
else:
print "ML: training sample is large enough to give it a shot."
# TODO: LOOP THROUGH DIFFERENT MACHINES?
# 5/12/16 -- no... need to make THIS a class and create multiple
# instances? Each one can be passed an instance of a machine?
# Machine can be trained to optimize different metrics
# (ACC, completeness, purity, etc. Have a list of acceptable ones.)
# Minimize a Loss function.
for metric in eval_metrics:
# REGISTER Machine Classifier
# Construct machine name --> Machine+Metric
machine = 'RF'
Name = machine+'_'+metric
# register an Agent for this Machine
try:
test = MLbureau.member[Name]
except:
MLbureau.member[Name] = swap.Agent_ML(Name, metric)
MLagent = MLbureau.member[Name]
#---------------------------------------------------------------
# TRAIN THE MACHINE; EVALUATE ON VALIDATION SAMPLE
#---------------------------------------------------------------
# Now we run the machine -- need cross validation on whatever size
# training sample we have ..
# Fixed until we build in other machine options
# Need to dynamically determine appropriate parameters...
#max_neighbors = get_max_neighbors(train_features, cv)
#n_neighbors = np.arange(1, (cv-1)*max_neighbors/cv, 5, dtype=int)
#params = {'n_neighbors':n_neighbors,
# 'weights':('uniform','distance')}
num_features = train_features.shape[1]
min_features = int(round(np.sqrt(num_features)))
params = {'max_features':np.arange(min_features, num_features+1),
'max_depth':np.arange(2,16)}
# Create the model
# for "estimator=XXX" all you need is an instance of a machine --
# any scikit-learn machine will do. However, non-sklearn machines..
# That will be a bit trickier! (i.e. Phil's conv-nets)
general_model = GridSearchCV(estimator=RF(n_estimators=30),
param_grid=params, n_jobs=31,
error_score=0, scoring=metric, cv=cv)
# Train the model -- k-fold cross validation is embedded
print "ML: Searching the hyperparameter space for values that "\
"optimize the %s."%metric
trained_model = general_model.fit(train_features, train_labels)
MLagent.model = trained_model
# Test accuracy (metric of choice) on validation sample
score = trained_model.score(valid_features, valid_labels)
ratio = np.sum(train_labels==pos_label) / len(train_labels)
MLagent.record_training(model_described_by=
trained_model.best_estimator_,
with_params=trained_model.best_params_,
trained_on=len(train_features),
with_ratio=ratio,
at_time=time,
with_train_score=trained_model.best_score_,
and_valid_score=trained_model.score(
valid_features, valid_labels))
valid_prob_thresh = trained_model.predict_proba(valid_features)[:,pos_label]
fps, tps, thresh = mtrx.roc_curve(valid_labels,valid_prob_thresh, pos_label=pos_label)
metric_list = compute_binary_metrics(fps, tps)
ACC, TPR, FPR, FNR, TNR, PPV, FDR, FOR, NPV = metric_list
MLagent.record_validation(accuracy=ACC, recall=TPR, precision=PPV,
false_pos=FPR, completeness_f=TNR,
contamination_f=NPV)
#MLagent.plot_ROC()
# ---------------------------------------------------------------
# IF TRAINED MACHINE PREDICTS WELL ON VALIDATION ....
# ---------------------------------------------------------------
if MLagent.is_trained(metric) or MLagent.trained:
print "ML: %s has successfully trained and will be applied "\
"to the test sample."%Name
# Retrieve the test sample
test_sample = storage.fetch_subsample(sample_type='test',
class_label='GZ2_raw_combo')
""" Notes on test sample:
The test sample will, in real life, be those subjects for which
we don't have an answer a priori. However, for now, this sample
is how we will judge, in part, the performance of the overall
method. As such, we only include those subjects which have
GZ2 labels in the Main Sample.
"""
try:
test_meta, test_features = ml.extract_features(test_sample,
keys=['M20_corr', 'C_corr', 'E', 'A_corr', 'G_corr'])
except:
print "ML: there are no subjects with the label 'test'!"
print "ML: Either there is nothing more to do or there is a BIG mistake..."
else:
print "ML: found test sample of %i subjects"%len(test_meta)
#-----------------------------------------------------------
# APPLY MACHINE TO TEST SAMPLE
#-----------------------------------------------------------
predictions = MLagent.model.predict(test_features)
probabilities = MLagent.model.predict_proba(test_features)[:,pos_label]
print "ML: %s has finished predicting labels for the test "\
"sample."%Name
print "ML: Generating performance report on the test sample:"
test_labels = test_meta['GZ2_raw_combo'].filled()
print mtrx.classification_report(test_labels, predictions)
test_accuracy = mtrx.accuracy_score(test_labels,predictions)
test_precision = mtrx.precision_score(test_labels,predictions,pos_label=pos_label)
test_recall = mtrx.recall_score(test_labels,predictions,pos_label=pos_label)
MLagent.record_evaluation(accuracy_score=test_accuracy,
precision_score=test_precision,
recall_score=test_recall,
at_time=time)
# ----------------------------------------------------------
# Save the predictions and probabilities to a new pickle
test_meta['predictions'] = predictions
test_meta['machine_probability'] = probabilities
# If is hasn't been done already, save the current directory
# ---------------------------------------------------------------------
tonights.parameters['trunk'] = survey+'_'+tonights.parameters['start']
# This is the standard directory...
#tonights.parameters['dir'] = os.getcwd()+'/'+tonights.parameters['trunk']
# This is to put files into the sims_Machine/... directory.
tonights.parameters['dir'] = os.getcwd()
filename=tonights.parameters['dir']+'/'+tonights.parameters['trunk']+'_'+Name+'.fits'
test_meta.write(filename)
count=0
noSWAP=0
for sub, pred, prob in zip(test_meta, predictions, probabilities):
# IF MACHINE P >= THRESHOLD, INSERT INTO SWAP COLLECTION
# --------------------------------------------------------
if (prob >= threshold) or (1-prob >= threshold):
# Flip the set label in the metadata file --
# don't want to use this as a training sample!
idx = np.where(subjects['asset_id'] == sub['asset_id'])
storage.subjects['MLsample'][idx] = 'mclass'
storage.subjects['retired_date'][idx] = time
count+=1
print "MC: Machine classifed {0} subjects with >= 90% confidence".format(count)
print "ML: Of those, {0} had never been seen by SWAP".format(noSWAP)
tonights.parameters['trunk'] = survey+'_'+tonights.parameters['start']
tonights.parameters['dir'] = os.getcwd()
if not os.path.exists(tonights.parameters['dir']):
os.makedirs(tonights.parameters['dir'])
# Repickle all the shits
# -----------------------------------------------------------------------
if tonights.parameters['repickle']:
#new_samplefile = swap.get_new_filename(tonights.parameters,'collection')
#print "ML: saving SWAP subjects to "+new_samplefile
#swap.write_pickle(sample, new_samplefile)
#tonights.parameters['samplefile'] = new_samplefile
new_bureaufile=swap.get_new_filename(tonights.parameters,'bureau','ML')
print "ML: saving MLbureau to "+new_bureaufile
swap.write_pickle(MLbureau, new_bureaufile)
tonights.parameters['MLbureaufile'] = new_bureaufile
metadatafile = swap.get_new_filename(tonights.parameters,'metadata')
print "ML: saving metadata to "+metadatafile
swap.write_pickle(storage, metadatafile)
tonights.parameters['metadatafile'] = metadatafile
# UPDATE CONFIG FILE with pickle filenames, dir/trunk, and (maybe) new day
# ----------------------------------------------------------------------
configfile = config.replace('startup','update')
# Random_file needs updating, else we always start from the same random
# state when update.config is reread!
random_file = open(tonights.parameters['random_file'],"w");
random_state = np.random.get_state();
cPickle.dump(random_state,random_file);
random_file.close();
swap.write_config(configfile, tonights.parameters)
return
def get_max_neighbors(sample, cv_folds):
# when performing cross validation using a KNN classifier, the number of
# nearest neighbors MUST be less than the sample size.
# Depending on how many folds one wishes their CV to compute, this changes
# So! For the required number of folds, calculate the number of nearest
# neighbors which would be ONE less than the length of the sample size
# once the FULL size of the sample has been split into num_folds groups
# for cross validation.
# Furthermore, if we have a massively huge sample, we don't actually want
# to search the ENTIRE n_neighbors parameter space. Increasing the
# neighbors effectively smooths over the noise and we don't want to smooth
# TOO much. SO, return a capped value --
# Minimum sample size = 100 right now, so max neighbors == 99
cv_size = len(sample)*(1-1/cv_folds)-1
max_neighbors = int(np.min([cv_size, 99]))
return max_neighbors
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", dest="configfile", help="Name of config file")
parser.add_option("-o", dest="offline", default=False, action='store_true',
help="Run in offline mode; e.g. on existing SWAP output.")
parser.add_option("-v", "--verbose", action='store_true', dest="verbose")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose")
(options, args) = parser.parse_args()
MachineClassifier(options, args)
#pdb.set_trace()
"""
ID = str(sub['asset_id'])
try:
#if prob >= threshold: status = 'detected'
#else: status = 'rejected'
#sample.member[ID].retiredby = 'machine'
#sample.member[ID].state = 'inactive'
#sample.member[ID].status = status
except:
noSWAP += 1
# We can't do this with the current pickles...
# Initialize the subject in SWAP Collection
ID = sub['asset_id']
try:
test = sample.member[ID]
except:
sample.member[ID] = swap.Subject(ID, str(sub['SDSS_id']),
category='test',
kind='test',
flavor='test',
truth='UNKNOWN',
thresholds=swap_thresholds,
location=sub['external_ref'],
prior=prior)
# THIS NEEDS TO FUCKING CHANGE. =(
if p >= threshold:
result = 'FEAT'
status = 'detected'
else:
result = 'NOT'
status = 'rejected'
sample.member[ID].was_described(by=MLbureau.member[Name],
as_being=result,
at_time=time,
while_ignoring=0,
haste=True)
# Try to jerry-rig something here....
if p >= threshold: status = 'detected'
else: status = 'rejected'
try:
sample.member[ID].retiredby = 'machine'
sample.member[ID].state = 'inactive'
sample.member[ID].status = status
else:
print "MC: subject {0} not found in collection. Bummer".format(ID)
"""
"""
labels, counts = np.unique(train_labels, return_counts=True)
majority = np.max(counts)
for label, count in zip(labels, counts):
if majority == count:
major_idx = np.where(train_labels == label)[0]
major_idx = major_idx[:np.sum(train_labels==1-label)]
minor_idx = np.where(train_labels == 1-label)[0]
train_features = np.concatenate([train_features[major_idx],
train_features[minor_idx]])
train_meta = np.concatenate([train_meta[major_idx],
train_meta[minor_idx]])
train_labels = np.concatenate([train_labels[major_idx],
train_labels[minor_idx]])
"""
|
melaniebeck/GZExpress
|
analysis/MachineClassifier.py
|
Python
|
mit
| 22,084
|
[
"Galaxy"
] |
3f4c8ca4a11d1754f89db7fd3403b70c15e75c022d97ea5b167c3d07e718a28b
|
#
# Copyright (C) 2018 Susan H. Leung
# All Rights Reserved
#
from rdkit import RDConfig
import os
import sys
import math
from datetime import datetime, timedelta
import unittest
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Geometry import rdGeometry as geom
from rdkit.Chem.rdchem import Atom
from rdkit.Chem.MolStandardize import rdMolStandardize
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Cleanup(self):
mol = Chem.MolFromSmiles("CCC(=O)O[Na]")
nmol = rdMolStandardize.Cleanup(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CCC(=O)[O-].[Na+]")
def test2StandardizeSmiles(self):
self.assertEqual(rdMolStandardize.StandardizeSmiles("CCC(=O)O[Na]"), "CCC(=O)[O-].[Na+]")
def test3Parents(self):
mol = Chem.MolFromSmiles("[Na]OC(=O)c1ccccc1")
nmol = rdMolStandardize.FragmentParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=C([O-])c1ccccc1")
mol = Chem.MolFromSmiles("C[NH+](C)(C).[Cl-]")
nmol = rdMolStandardize.ChargeParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CN(C)C")
mol = Chem.MolFromSmiles("[O-]CCCC=CO.[Na+]")
nmol = rdMolStandardize.TautomerParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCCC[O-].[Na+]")
nmol = rdMolStandardize.TautomerParent(mol, skipStandardize=True)
# same answer because of the standardization at the end
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCCC[O-].[Na+]")
mol = Chem.MolFromSmiles("C[C@](F)(Cl)C/C=C/[C@H](F)Cl")
nmol = rdMolStandardize.StereoParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CC(F)(Cl)CC=CC(F)Cl")
mol = Chem.MolFromSmiles("[12CH3][13CH3]")
nmol = rdMolStandardize.IsotopeParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CC")
mol = Chem.MolFromSmiles("[Na]Oc1c([12C@H](F)Cl)c(O[2H])c(C(=O)O)cc1CC=CO")
nmol = rdMolStandardize.SuperParent(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCc1cc(C(=O)O)c(O)c(C(F)Cl)c1O")
mol = Chem.MolFromSmiles("[Na]Oc1c([12C@H](F)Cl)c(O[2H])c(C(=O)O)cc1CC=CO")
nmol = rdMolStandardize.SuperParent(mol, skipStandardize=True)
self.assertEqual(Chem.MolToSmiles(nmol), "O=CCCc1cc(C(=O)[O-])c(O)c(C(F)Cl)c1O.[Na+]")
def test4Normalize(self):
mol = Chem.MolFromSmiles("C[N+](C)=C\C=C\[O-]")
nmol = rdMolStandardize.Normalize(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "CN(C)C=CC=O")
def test4Reionize(self):
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
nmol = rdMolStandardize.Reionize(mol)
self.assertEqual(Chem.MolToSmiles(nmol), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
def test5Metal(self):
mol = Chem.MolFromSmiles("C1(CCCCC1)[Zn]Br")
md = rdMolStandardize.MetalDisconnector()
nm = md.Disconnect(mol)
# Metal.MetalDisconnector.Disconnect(mol)
self.assertEqual(Chem.MolToSmiles(nm), "[Br-].[CH-]1CCCCC1.[Zn+2]")
# test user defined metal_nof
md.SetMetalNof(
Chem.MolFromSmarts(
"[Li,K,Rb,Cs,Fr,Be,Mg,Ca,Sr,Ba,Ra,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,Al,Ga,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi]~[N,O,F]"
))
mol2 = Chem.MolFromSmiles("CCC(=O)O[Na]")
nm2 = md.Disconnect(mol2)
self.assertEqual(Chem.MolToSmiles(nm2), "CCC(=O)O[Na]")
def test6Charge(self):
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
# instantiate with default acid base pair library
reionizer = rdMolStandardize.Reionizer()
nm = reionizer.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
# try reionize with another acid base pair library without the right
# pairs
abfile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'MolStandardize', 'test_data',
'acid_base_pairs2.txt')
reionizer2 = rdMolStandardize.Reionizer(abfile)
nm2 = reionizer2.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm2), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
# test Uncharger
uncharger = rdMolStandardize.Uncharger()
mol3 = Chem.MolFromSmiles("O=C([O-])c1ccccc1")
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "O=C(O)c1ccccc1")
# test canonical Uncharger
uncharger = rdMolStandardize.Uncharger(canonicalOrder=False)
mol3 = Chem.MolFromSmiles("C[N+](C)(C)CC(C(=O)[O-])CC(=O)[O-]")
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "C[N+](C)(C)CC(CC(=O)[O-])C(=O)O")
uncharger = rdMolStandardize.Uncharger(canonicalOrder=True)
nm3 = uncharger.uncharge(mol3)
self.assertEqual(Chem.MolToSmiles(nm3), "C[N+](C)(C)CC(CC(=O)O)C(=O)[O-]")
def test7Fragment(self):
fragremover = rdMolStandardize.FragmentRemover()
mol = Chem.MolFromSmiles("CN(C)C.Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(Chem.MolToSmiles(nm), "CN(C)C")
lfragchooser = rdMolStandardize.LargestFragmentChooser()
mol2 = Chem.MolFromSmiles("[N+](=O)([O-])[O-].[CH3+]")
nm2 = lfragchooser.choose(mol2)
self.assertEqual(Chem.MolToSmiles(nm2), "O=[N+]([O-])[O-]")
lfragchooser2 = rdMolStandardize.LargestFragmentChooser(preferOrganic=True)
nm3 = lfragchooser2.choose(mol2)
self.assertEqual(Chem.MolToSmiles(nm3), "[CH3+]")
fragremover = rdMolStandardize.FragmentRemover(skip_if_all_match=True)
mol = Chem.MolFromSmiles("[Na+].Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(nm.GetNumAtoms(), mol.GetNumAtoms())
smi3 = "CNC[C@@H]([C@H]([C@@H]([C@@H](CO)O)O)O)O.c1cc2c(cc1C(=O)O)oc(n2)c3cc(cc(c3)Cl)Cl"
lfParams = rdMolStandardize.CleanupParameters()
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "CNC[C@H](O)[C@@H](O)[C@H](O)[C@H](O)CO")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "O=C(O)c1ccc2nc(-c3cc(Cl)cc(Cl)c3)oc2c1")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol3 = Chem.MolFromSmiles(smi3)
lfrag3 = lfrag_params.choose(mol3)
self.assertEqual(Chem.MolToSmiles(lfrag3), "O=C(O)c1ccc2nc(-c3cc(Cl)cc(Cl)c3)oc2c1")
smi4 = "CC.O=[Pb]=O"
lfParams = rdMolStandardize.CleanupParameters()
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "O=[Pb]=O")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "O=[Pb]=O")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserCountHeavyAtomsOnly = True
lfParams.preferOrganic = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
lfParams = rdMolStandardize.CleanupParameters()
lfParams.largestFragmentChooserUseAtomCount = False
lfParams.preferOrganic = True
lfrag_params = rdMolStandardize.LargestFragmentChooser(lfParams)
mol4 = Chem.MolFromSmiles(smi4)
lfrag4 = lfrag_params.choose(mol4)
self.assertEqual(Chem.MolToSmiles(lfrag4), "CC")
def test8Normalize(self):
normalizer = rdMolStandardize.Normalizer()
mol = Chem.MolFromSmiles("C[n+]1ccccc1[O-]")
nm = normalizer.normalize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "Cn1ccccc1=O")
def test9Validate(self):
vm = rdMolStandardize.RDKitValidation()
mol = Chem.MolFromSmiles("CO(C)C", sanitize=False)
msg = vm.validate(mol)
self.assertEqual(len(msg), 1)
self.assertEqual
("""INFO: [ValenceValidation] Explicit valence for atom # 1 O, 3, is greater than permitted""",
msg[0])
vm2 = rdMolStandardize.MolVSValidation([rdMolStandardize.FragmentValidation()])
# with no argument it also works
# vm2 = rdMolStandardize.MolVSValidation()
mol2 = Chem.MolFromSmiles("COc1cccc(C=N[N-]C(N)=O)c1[O-].O.O.O.O=[U+2]=O")
msg2 = vm2.validate(mol2)
self.assertEqual(len(msg2), 1)
self.assertEqual
("""INFO: [FragmentValidation] water/hydroxide is present""", msg2[0])
vm3 = rdMolStandardize.MolVSValidation()
mol3 = Chem.MolFromSmiles("C1COCCO1.O=C(NO)NO")
msg3 = vm3.validate(mol3)
self.assertEqual(len(msg3), 2)
self.assertEqual
("""INFO: [FragmentValidation] 1,2-dimethoxyethane is present""", msg3[0])
self.assertEqual
("""INFO: [FragmentValidation] 1,4-dioxane is present""", msg3[1])
atomic_no = [6, 7, 8]
allowed_atoms = [Atom(i) for i in atomic_no]
vm4 = rdMolStandardize.AllowedAtomsValidation(allowed_atoms)
mol4 = Chem.MolFromSmiles("CC(=O)CF")
msg4 = vm4.validate(mol4)
self.assertEqual(len(msg4), 1)
self.assertEqual
("""INFO: [AllowedAtomsValidation] Atom F is not in allowedAtoms list""", msg4[0])
atomic_no = [9, 17, 35]
disallowed_atoms = [Atom(i) for i in atomic_no]
vm5 = rdMolStandardize.DisallowedAtomsValidation(disallowed_atoms)
mol5 = Chem.MolFromSmiles("CC(=O)CF")
msg5 = vm4.validate(mol5)
self.assertEqual(len(msg5), 1)
self.assertEqual
("""INFO: [DisallowedAtomsValidation] Atom F is in disallowedAtoms list""", msg5[0])
msg6 = rdMolStandardize.ValidateSmiles("ClCCCl.c1ccccc1O")
self.assertEqual(len(msg6), 1)
self.assertEqual
("""INFO: [FragmentValidation] 1,2-dichloroethane is present""", msg6[0])
def test10NormalizeFromData(self):
data = """// Name SMIRKS
Nitro to N+(O-)=O [N,P,As,Sb;X3:1](=[O,S,Se,Te:2])=[O,S,Se,Te:3]>>[*+1:1]([*-1:2])=[*:3]
Sulfone to S(=O)(=O) [S+2:1]([O-:2])([O-:3])>>[S+0:1](=[O-0:2])(=[O-0:3])
Pyridine oxide to n+O- [n:1]=[O:2]>>[n+:1][O-:2]
// Azide to N=N+=N- [*,H:1][N:2]=[N:3]#[N:4]>>[*,H:1][N:2]=[N+:3]=[N-:4]
"""
normalizer1 = rdMolStandardize.Normalizer()
params = rdMolStandardize.CleanupParameters()
normalizer2 = rdMolStandardize.NormalizerFromData(data, params)
imol = Chem.MolFromSmiles("O=N(=O)CCN=N#N", sanitize=False)
mol1 = normalizer1.normalize(imol)
mol2 = normalizer2.normalize(imol)
self.assertEqual(Chem.MolToSmiles(imol), "N#N=NCCN(=O)=O")
self.assertEqual(Chem.MolToSmiles(mol1), "[N-]=[N+]=NCC[N+](=O)[O-]")
self.assertEqual(Chem.MolToSmiles(mol2), "N#N=NCC[N+](=O)[O-]")
def test11FragmentParams(self):
data = """// Name SMARTS
fluorine [F]
chlorine [Cl]
"""
fragremover = rdMolStandardize.FragmentRemoverFromData(data)
mol = Chem.MolFromSmiles("CN(C)C.Cl.Cl.Br")
nm = fragremover.remove(mol)
self.assertEqual(Chem.MolToSmiles(nm), "Br.CN(C)C")
def test12ChargeParams(self):
params = """// The default list of AcidBasePairs, sorted from strongest to weakest.
// This list is derived from the Food and Drug: Administration Substance
// Registration System Standard Operating Procedure guide.
//
// Name Acid Base
-SO2H [!O][SD3](=O)[OH] [!O][SD3](=O)[O-]
-SO3H [!O]S(=O)(=O)[OH] [!O]S(=O)(=O)[O-]
"""
mol = Chem.MolFromSmiles("C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O")
# instantiate with default acid base pair library
reionizer = rdMolStandardize.ReionizerFromData(params, [])
print("done")
nm = reionizer.reionize(mol)
self.assertEqual(Chem.MolToSmiles(nm), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
def test13Tautomers(self):
enumerator = rdMolStandardize.TautomerEnumerator()
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
params = rdMolStandardize.CleanupParameters()
enumerator = rdMolStandardize.TautomerEnumerator(params)
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res), 2)
ctauts = list(sorted(Chem.MolToSmiles(x) for x in taut_res))
self.assertEqual(ctauts, ['O=C1CCCCC1', 'OC1=CCCCC1'])
self.assertEqual(list(taut_res.smiles), ['O=C1CCCCC1', 'OC1=CCCCC1'])
# this tests the non-templated overload
self.assertEqual(Chem.MolToSmiles(enumerator.PickCanonical(taut_res)), "O=C1CCCCC1")
# this tests the templated overload
self.assertEqual(Chem.MolToSmiles(enumerator.PickCanonical(set(taut_res()))), "O=C1CCCCC1")
with self.assertRaises(TypeError):
enumerator.PickCanonical(1)
with self.assertRaises(TypeError):
enumerator.PickCanonical([0, 1])
self.assertEqual(
Chem.MolToSmiles(
enumerator.PickCanonical(Chem.MolFromSmiles(x) for x in ['O=C1CCCCC1', 'OC1=CCCCC1'])),
"O=C1CCCCC1")
def scorefunc1(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('[OH]')
return len(mol.GetSubstructMatches(p))
def scorefunc2(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('O=C')
return len(mol.GetSubstructMatches(p))
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.Canonicalize(m, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.Canonicalize(m,
lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.Canonicalize(m, lambda x: 'fail')
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('N=c1[nH]cccc1')), 99)
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('Nc1ncccc1')), 100)
def scorefunc2(mol):
' stupid tautomer scoring function '
p = Chem.MolFromSmarts('O=C')
return len(mol.GetSubstructMatches(p))
m = Chem.MolFromSmiles("C1(=CCCCC1)O")
ctaut = enumerator.Canonicalize(m, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.Canonicalize(m, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.Canonicalize(m,
lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.Canonicalize(m, lambda x: 'fail')
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('N=c1[nH]cccc1')), 99)
self.assertEqual(enumerator.ScoreTautomer(Chem.MolFromSmiles('Nc1ncccc1')), 100)
res = enumerator.Enumerate(m)
# this test the specialized overload
ctaut = enumerator.PickCanonical(res, scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.PickCanonical(res, scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.PickCanonical(
res, lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.PickCanonical(res, lambda x: 'fail')
# this test the non-specialized overload
ctaut = enumerator.PickCanonical(set(res()), scorefunc1)
self.assertEqual(Chem.MolToSmiles(ctaut), "OC1=CCCCC1")
ctaut = enumerator.PickCanonical(set(res()), scorefunc2)
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure lambdas work
ctaut = enumerator.PickCanonical(
set(res()), lambda x: len(x.GetSubstructMatches(Chem.MolFromSmarts('C=O'))))
self.assertEqual(Chem.MolToSmiles(ctaut), "O=C1CCCCC1")
# make sure we behave if we return something bogus from the scoring function
with self.assertRaises(TypeError):
ctaut = enumerator.PickCanonical(set(res()), lambda x: 'fail')
def test14TautomerDetails(self):
enumerator = rdMolStandardize.TautomerEnumerator()
m = Chem.MolFromSmiles("c1ccccc1CN=c1[nH]cccc1")
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(taut_res.modifiedAtoms, (7, 9))
self.assertEqual(len(taut_res.modifiedBonds), 7)
self.assertEqual(taut_res.modifiedBonds, (7, 8, 9, 10, 11, 12, 14))
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(taut_res.modifiedAtoms, (7, 9))
taut_res = enumerator.Enumerate(m)
self.assertEqual(len(taut_res.tautomers), 2)
self.assertEqual(len(taut_res.modifiedBonds), 7)
self.assertEqual(taut_res.modifiedBonds, (7, 8, 9, 10, 11, 12, 14))
def test15EnumeratorParams(self):
# Test a structure with hundreds of tautomers.
smi68 = "[H][C](CO)(NC(=O)C1=C(O)C(O)=CC=C1)C(O)=O"
m68 = Chem.MolFromSmiles(smi68)
enumerator = rdMolStandardize.TautomerEnumerator()
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 252)
self.assertEqual(len(res68.tautomers), len(res68))
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTransformsReached)
enumerator = rdMolStandardize.GetV1TautomerEnumerator()
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 292)
self.assertEqual(len(res68.tautomers), len(res68))
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTransformsReached)
params = rdMolStandardize.CleanupParameters()
params.maxTautomers = 50
enumerator = rdMolStandardize.TautomerEnumerator(params)
res68 = enumerator.Enumerate(m68)
self.assertEqual(len(res68), 50)
self.assertEqual(res68.status, rdMolStandardize.TautomerEnumeratorStatus.MaxTautomersReached)
sAlaSmi = "C[C@H](N)C(=O)O"
sAla = Chem.MolFromSmiles(sAlaSmi)
# test remove (S)-Ala stereochemistry
self.assertEqual(sAla.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertEqual(sAla.GetAtomWithIdx(1).GetProp("_CIPCode"), "S")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(sAla)
for taut in res:
self.assertEqual(taut.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(taut.GetAtomWithIdx(1).HasProp("_CIPCode"))
for taut in res.tautomers:
self.assertEqual(taut.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(taut.GetAtomWithIdx(1).HasProp("_CIPCode"))
for i, taut in enumerate(res):
self.assertEqual(Chem.MolToSmiles(taut), Chem.MolToSmiles(res.tautomers[i]))
self.assertEqual(len(res), len(res.smiles))
self.assertEqual(len(res), len(res.tautomers))
self.assertEqual(len(res), len(res()))
self.assertEqual(len(res), len(res.smilesTautomerMap))
for i, taut in enumerate(res.tautomers):
self.assertEqual(Chem.MolToSmiles(taut), Chem.MolToSmiles(res[i]))
self.assertEqual(Chem.MolToSmiles(taut), res.smiles[i])
self.assertEqual(Chem.MolToSmiles(taut),
Chem.MolToSmiles(res.smilesTautomerMap.values()[i].tautomer))
for i, k in enumerate(res.smilesTautomerMap.keys()):
self.assertEqual(k, res.smiles[i])
for i, v in enumerate(res.smilesTautomerMap.values()):
self.assertEqual(Chem.MolToSmiles(v.tautomer), Chem.MolToSmiles(res[i]))
for i, (k, v) in enumerate(res.smilesTautomerMap.items()):
self.assertEqual(k, res.smiles[i])
self.assertEqual(Chem.MolToSmiles(v.tautomer), Chem.MolToSmiles(res[i]))
for i, smiles in enumerate(res.smiles):
self.assertEqual(smiles, Chem.MolToSmiles(res[i]))
self.assertEqual(smiles, res.smilesTautomerMap.keys()[i])
self.assertEqual(Chem.MolToSmiles(res.tautomers[-1]), Chem.MolToSmiles(res[-1]))
self.assertEqual(Chem.MolToSmiles(res[-1]), Chem.MolToSmiles(res[len(res) - 1]))
self.assertEqual(Chem.MolToSmiles(res.tautomers[-1]),
Chem.MolToSmiles(res.tautomers[len(res) - 1]))
with self.assertRaises(IndexError):
res[len(res)]
with self.assertRaises(IndexError):
res[-len(res) - 1]
with self.assertRaises(IndexError):
res.tautomers[len(res)]
with self.assertRaises(IndexError):
res.tautomers[-len(res.tautomers) - 1]
# test retain (S)-Ala stereochemistry
self.assertEqual(sAla.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertEqual(sAla.GetAtomWithIdx(1).GetProp("_CIPCode"), "S")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(sAla)
for taut in res:
tautAtom = taut.GetAtomWithIdx(1)
if (tautAtom.GetHybridization() == Chem.HybridizationType.SP3):
self.assertEqual(tautAtom.GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CCW)
self.assertTrue(tautAtom.HasProp("_CIPCode"))
self.assertEqual(tautAtom.GetProp("_CIPCode"), "S")
else:
self.assertFalse(tautAtom.HasProp("_CIPCode"))
self.assertEqual(tautAtom.GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
eEnolSmi = "C/C=C/O"
eEnol = Chem.MolFromSmiles(eEnolSmi)
self.assertEqual(eEnol.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOE)
# test remove enol E stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(eEnol)
for taut in res.tautomers:
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREONONE)
# test retain enol E stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(eEnol)
for taut in res.tautomers:
if (taut.GetBondWithIdx(1).GetBondType() == Chem.BondType.DOUBLE):
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOE)
zEnolSmi = "C/C=C\\O"
zEnol = Chem.MolFromSmiles(zEnolSmi)
self.assertEqual(zEnol.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOZ)
# test remove enol Z stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(zEnol)
for taut in res:
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREONONE)
# test retain enol Z stereochemistry
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveBondStereo = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(zEnol)
for taut in res:
if (taut.GetBondWithIdx(1).GetBondType() == Chem.BondType.DOUBLE):
self.assertEqual(taut.GetBondWithIdx(1).GetStereo(), Chem.BondStereo.STEREOZ)
chembl2024142Smi = "[2H]C1=C(C(=C2C(=C1[2H])C(=O)C(=C(C2=O)C([2H])([2H])[2H])C/C=C(\\C)/CC([2H])([2H])/C=C(/CC/C=C(\\C)/CCC=C(C)C)\\C([2H])([2H])[2H])[2H])[2H]"
chembl2024142 = Chem.MolFromSmiles(chembl2024142Smi)
params = Chem.RemoveHsParameters()
params.removeAndTrackIsotopes = True
chembl2024142 = Chem.RemoveHs(chembl2024142, params)
self.assertTrue(chembl2024142.GetAtomWithIdx(12).HasProp("_isotopicHs"))
# test remove isotopic Hs involved in tautomerism
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveIsotopicHs = True
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(chembl2024142)
for taut in res:
self.assertFalse(taut.GetAtomWithIdx(12).HasProp("_isotopicHs"))
# test retain isotopic Hs involved in tautomerism
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveIsotopicHs = False
enumerator = rdMolStandardize.TautomerEnumerator(params)
res = enumerator.Enumerate(chembl2024142)
for taut in res:
self.assertTrue(taut.GetAtomWithIdx(12).HasProp("_isotopicHs"))
def test16EnumeratorCallback(self):
class MyTautomerEnumeratorCallback(rdMolStandardize.TautomerEnumeratorCallback):
def __init__(self, parent, timeout_ms):
super().__init__()
self._parent = parent
self._timeout = timedelta(milliseconds=timeout_ms)
self._start_time = datetime.now()
def __call__(self, mol, res):
self._parent.assertTrue(isinstance(mol, Chem.Mol))
self._parent.assertTrue(isinstance(res, rdMolStandardize.TautomerEnumeratorResult))
return (datetime.now() - self._start_time < self._timeout)
class MyBrokenCallback(rdMolStandardize.TautomerEnumeratorCallback):
pass
class MyBrokenCallback2(rdMolStandardize.TautomerEnumeratorCallback):
__call__ = 1
# Test a structure with hundreds of tautomers.
smi68 = "[H][C](CO)(NC(=O)C1=C(O)C(O)=CC=C1)C(O)=O"
m68 = Chem.MolFromSmiles(smi68)
params = rdMolStandardize.CleanupParameters()
params.maxTransforms = 10000
params.maxTautomers = 10000
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 50.0))
res68 = enumerator.Enumerate(m68)
# either the enumeration was canceled due to timeout
# or it has completed very quickly
hasReachedTimeout = (len(res68.tautomers) < 375
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Canceled)
hasCompleted = (len(res68.tautomers) == 375
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Completed)
if hasReachedTimeout:
print("Enumeration was canceled due to timeout (50 ms)", file=sys.stderr)
if hasCompleted:
print("Enumeration has completed", file=sys.stderr)
self.assertTrue(hasReachedTimeout or hasCompleted)
self.assertTrue(hasReachedTimeout ^ hasCompleted)
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 10000.0))
res68 = enumerator.Enumerate(m68)
# either the enumeration completed
# or it ran very slowly and was canceled due to timeout
hasReachedTimeout = (len(res68.tautomers) < 295
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Canceled)
hasCompleted = (len(res68.tautomers) == 295
and res68.status == rdMolStandardize.TautomerEnumeratorStatus.Completed)
if hasReachedTimeout:
print("Enumeration was canceled due to timeout (10 s)", file=sys.stderr)
if hasCompleted:
print("Enumeration has completed", file=sys.stderr)
self.assertTrue(hasReachedTimeout or hasCompleted)
self.assertTrue(hasReachedTimeout ^ hasCompleted)
enumerator = rdMolStandardize.TautomerEnumerator(params)
with self.assertRaises(AttributeError):
enumerator.SetCallback(MyBrokenCallback())
with self.assertRaises(AttributeError):
enumerator.SetCallback(MyBrokenCallback2())
# GitHub #4736
enumerator = rdMolStandardize.TautomerEnumerator(params)
enumerator.SetCallback(MyTautomerEnumeratorCallback(self, 50.0))
enumerator_copy = rdMolStandardize.TautomerEnumerator(enumerator)
res68 = enumerator.Enumerate(m68)
res68_copy = enumerator_copy.Enumerate(m68)
self.assertTrue(res68.status == res68_copy.status)
def test17PickCanonicalCIPChangeOnChiralCenter(self):
def get_canonical_taut(res):
best_idx = max([(rdMolStandardize.TautomerEnumerator.ScoreTautomer(t), i)
for i, t in enumerate(res.tautomers)])[1]
return res.tautomers[best_idx]
smi = "CC\\C=C(/O)[C@@H](C)C(C)=O"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
self.assertEqual(mol.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(mol.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
# here the chirality disappears as the chiral center is itself involved in tautomerism
te = rdMolStandardize.TautomerEnumerator()
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(can_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@@H](C)C(C)=O")
# here the chirality disappears as the chiral center is itself involved in tautomerism
# the reassignStereo setting has no influence
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(best_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality disappears as the chiral center is itself involved in tautomerism
# the reassignStereo setting has no influence
params = rdMolStandardize.CleanupParameters()
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED)
self.assertFalse(best_taut.GetAtomWithIdx(5).HasProp("_CIPCode"))
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)C(C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now S (correct)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@@H](C)C(C)=O")
# here the chirality stays even if the chiral center is itself involved in tautomerism
# because of the tautomerRemoveSp3Stereo parameter being set to false
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still R (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 8)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@@H](C)C(C)=O")
smi = "CC\\C=C(/O)[C@@](CC)(C)C(C)=O"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
self.assertEqual(mol.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(mol.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
# here the chirality stays no matter how tautomerRemoveSp3Stereo
# is set as the chiral center is not involved in tautomerism
te = rdMolStandardize.TautomerEnumerator()
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
can_taut = te.Canonicalize(mol)
self.assertIsNotNone(can_taut)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(can_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(can_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now R (correct)
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still S (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# as reassignStereo by default is true, the CIP code has been recomputed
# and therefore it is now R (correct)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "R")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
# here the chirality stays even if the tautomerRemoveSp3Stereo parameter
# is set to false as the chiral center is not involved in tautomerism
# as reassignStereo is false, the CIP code has not been recomputed
# and therefore it is still S (incorrect)
params = rdMolStandardize.CleanupParameters()
params.tautomerRemoveSp3Stereo = False
params.tautomerReassignStereo = False
te = rdMolStandardize.TautomerEnumerator(params)
res = te.Enumerate(mol)
self.assertEqual(res.status, rdMolStandardize.TautomerEnumeratorStatus.Completed)
self.assertEqual(len(res.tautomers), 4)
best_taut = get_canonical_taut(res)
self.assertIsNotNone(best_taut)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetChiralTag(), Chem.ChiralType.CHI_TETRAHEDRAL_CW)
self.assertEqual(best_taut.GetAtomWithIdx(5).GetProp("_CIPCode"), "S")
self.assertEqual(Chem.MolToSmiles(best_taut), "CCCC(=O)[C@](C)(CC)C(C)=O")
def test18TautomerEnumeratorResultIter(self):
smi = "Cc1nnc(NC(=O)N2CCN(Cc3ccc(F)cc3)C(=O)C2)s1"
mol = Chem.MolFromSmiles(smi)
self.assertIsNotNone(mol)
te = rdMolStandardize.TautomerEnumerator()
res = te.Enumerate(mol)
res_it = iter(res)
i = 0
while 1:
try:
t = next(res_it)
except StopIteration:
break
self.assertEqual(Chem.MolToSmiles(t), Chem.MolToSmiles(res[i]))
i += 1
self.assertEqual(i, len(res))
res_it = iter(res)
i = -len(res)
while 1:
try:
t = next(res_it)
except StopIteration:
break
self.assertEqual(Chem.MolToSmiles(t), Chem.MolToSmiles(res[i]))
i += 1
self.assertEqual(i, 0)
def test19NormalizeFromParams(self):
params = rdMolStandardize.CleanupParameters()
params.normalizationsFile = "ThisFileDoesNotExist.txt"
with self.assertRaises(OSError):
rdMolStandardize.NormalizerFromParams(params)
def test20NoneHandling(self):
with self.assertRaises(ValueError):
rdMolStandardize.ChargeParent(None)
with self.assertRaises(ValueError):
rdMolStandardize.Cleanup(None)
with self.assertRaises(ValueError):
rdMolStandardize.FragmentParent(None)
with self.assertRaises(ValueError):
rdMolStandardize.Normalize(None)
with self.assertRaises(ValueError):
rdMolStandardize.Reionize(None)
def test21UpdateFromJSON(self):
params = rdMolStandardize.CleanupParameters()
# note: these actual parameters aren't useful... they are for testing
rdMolStandardize.UpdateParamsFromJSON(
params, """{
"normalizationData":[
{"name":"silly 1","smarts":"[Cl:1]>>[F:1]"},
{"name":"silly 2","smarts":"[Br:1]>>[F:1]"}
],
"acidbaseData":[
{"name":"-CO2H","acid":"C(=O)[OH]","base":"C(=O)[O-]"},
{"name":"phenol","acid":"c[OH]","base":"c[O-]"}
],
"fragmentData":[
{"name":"hydrogen", "smarts":"[H]"},
{"name":"fluorine", "smarts":"[F]"},
{"name":"chlorine", "smarts":"[Cl]"}
],
"tautomerTransformData":[
{"name":"1,3 (thio)keto/enol f","smarts":"[CX4!H0]-[C]=[O,S,Se,Te;X1]","bonds":"","charges":""},
{"name":"1,3 (thio)keto/enol r","smarts":"[O,S,Se,Te;X2!H0]-[C]=[C]"}
]}""")
m = Chem.MolFromSmiles("CCC=O")
te = rdMolStandardize.TautomerEnumerator(params)
tauts = [Chem.MolToSmiles(x) for x in te.Enumerate(m)]
self.assertEqual(tauts, ["CC=CO", "CCC=O"])
self.assertEqual(Chem.MolToSmiles(rdMolStandardize.CanonicalTautomer(m, params)), "CCC=O")
# now with defaults
te = rdMolStandardize.TautomerEnumerator()
tauts = [Chem.MolToSmiles(x) for x in te.Enumerate(m)]
self.assertEqual(tauts, ["CC=CO", "CCC=O"])
self.assertEqual(Chem.MolToSmiles(rdMolStandardize.CanonicalTautomer(m)), "CCC=O")
m = Chem.MolFromSmiles('ClCCCBr')
nm = rdMolStandardize.Normalize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "FCCCF")
# now with defaults
nm = rdMolStandardize.Normalize(m)
self.assertEqual(Chem.MolToSmiles(nm), "ClCCCBr")
m = Chem.MolFromSmiles('c1cc([O-])cc(C(=O)O)c1')
nm = rdMolStandardize.Reionize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "O=C([O-])c1cccc(O)c1")
# now with defaults
nm = rdMolStandardize.Reionize(m)
self.assertEqual(Chem.MolToSmiles(nm), "O=C([O-])c1cccc(O)c1")
m = Chem.MolFromSmiles('C1=C(C=CC(=C1)[S]([O-])=O)[S](O)(=O)=O')
nm = rdMolStandardize.Reionize(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "O=S([O-])c1ccc(S(=O)(=O)O)cc1")
# now with defaults
nm = rdMolStandardize.Reionize(m)
self.assertEqual(Chem.MolToSmiles(nm), "O=S(O)c1ccc(S(=O)(=O)[O-])cc1")
m = Chem.MolFromSmiles('[F-].[Cl-].[Br-].CC')
nm = rdMolStandardize.RemoveFragments(m, params)
self.assertEqual(Chem.MolToSmiles(nm), "CC.[Br-]")
# now with defaults
nm = rdMolStandardize.RemoveFragments(m)
self.assertEqual(Chem.MolToSmiles(nm), "CC")
if __name__ == "__main__":
unittest.main()
|
bp-kelley/rdkit
|
Code/GraphMol/MolStandardize/Wrap/testMolStandardize.py
|
Python
|
bsd-3-clause
| 42,057
|
[
"RDKit"
] |
d1fddc5079ae6391b983b9f9096a050cfa36d802b10f86fdf6b6e8274878e1a5
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
# Generic vector and matrix routines for 3-Space
# Assembled for usage in PyMOL and Chemical Python
#
# Assumes row-major matrices and arrays
# [ [vector 1], [vector 2], [vector 3] ]
#
# Raises ValueError when given bad input
#
# TODO: documentation!
import math
import random
import copy
RSMALL4 = 0.0001
#------------------------------------------------------------------------------
def get_null():
return [0.0,0.0,0.0]
#------------------------------------------------------------------------------
def get_identity():
return [[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]]
#------------------------------------------------------------------------------
def distance_sq(v1, v2):
d0 = v2[0] - v1[0]
d1 = v2[1] - v1[1]
d2 = v2[2] - v1[2]
return (d0*d0) + (d1*d1) + (d2*d2)
#------------------------------------------------------------------------------
def distance_sq(v1, v2):
d0 = v2[0] - v1[0]
d1 = v2[1] - v1[1]
d2 = v2[2] - v1[2]
return (d0*d0) + (d1*d1) + (d2*d2)
#------------------------------------------------------------------------------
def distance(v1, v2):
d0 = v2[0] - v1[0]
d1 = v2[1] - v1[1]
d2 = v2[2] - v1[2]
return math.sqrt((d0*d0) + (d1*d1) + (d2*d2))
#------------------------------------------------------------------------------
def length(v):
return math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
#------------------------------------------------------------------------------
def random_displacement(v,radius):
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
v_len = length(vect)
if v_len<=0.5:
break;
if v_len > 0.00000000001:
v_len = random.random()*radius / v_len
return add(v,scale([vect[0], vect[1], vect[2]],v_len))
else:
return v
#------------------------------------------------------------------------------
def random_sphere(v,radius):
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
v_len = length(vect)
if (v_len<=0.5) and (v_len!=0.0):
break;
return add(v,scale([vect[0], vect[1], vect[2]],2*radius/v_len))
#------------------------------------------------------------------------------
def random_vector():
r_vect = lambda r=random.random:[r()-0.5,r()-0.5,r()-0.5]
while 1:
vect = r_vect()
if length(vect)<=0.5:
break;
return scale([vect[0], vect[1], vect[2]],2.0)
#------------------------------------------------------------------------------
def add(v1,v2):
return [v1[0]+v2[0],v1[1]+v2[1],v1[2]+v2[2]]
#------------------------------------------------------------------------------
def average(v1,v2):
return [(v1[0]+v2[0])/2.0,(v1[1]+v2[1])/2.0,(v1[2]+v2[2])/2.0]
#------------------------------------------------------------------------------
def scale(v,factor):
return [v[0]*factor,v[1]*factor,v[2]*factor]
#------------------------------------------------------------------------------
def negate(v):
return [-v[0],-v[1],-v[2]]
#------------------------------------------------------------------------------
def sub(v1,v2):
return [v1[0]-v2[0],v1[1]-v2[1],v1[2]-v2[2]]
#------------------------------------------------------------------------------
def dot_product(v1,v2):
return v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
#------------------------------------------------------------------------------
def cross_product(v1,v2):
return [(v1[1]*v2[2]) - (v1[2]*v2[1]),
(v1[2]*v2[0]) - (v1[0]*v2[2]),
(v1[0]*v2[1]) - (v1[1]*v2[0])]
#------------------------------------------------------------------------------
def transform(m,v):
return [m[0][0]*v[0] + m[0][1]*v[1] + m[0][2]*v[2],
m[1][0]*v[0] + m[1][1]*v[1] + m[1][2]*v[2],
m[2][0]*v[0] + m[2][1]*v[1] + m[2][2]*v[2]]
#------------------------------------------------------------------------------
def inverse_transform(m,v):
return [m[0][0]*v[0] + m[1][0]*v[1] + m[2][0]*v[2],
m[0][1]*v[0] + m[1][1]*v[1] + m[2][1]*v[2],
m[0][2]*v[0] + m[1][2]*v[1] + m[2][2]*v[2]]
#------------------------------------------------------------------------------
def multiply(m1,m2): # HAVEN'T YET VERIFIED THAT THIS CONFORMS TO STANDARD DEFT
return [[m1[0][0]*m2[0][0] + m1[0][1]*m2[1][0] + m1[0][2]*m2[2][0],
m1[1][0]*m2[0][0] + m1[1][1]*m2[1][0] + m1[1][2]*m2[2][0],
m1[2][0]*m2[0][0] + m1[2][1]*m2[1][0] + m1[2][2]*m2[2][0]],
[m1[0][0]*m2[0][1] + m1[0][1]*m2[1][1] + m1[0][2]*m2[2][1],
m1[1][0]*m2[0][1] + m1[1][1]*m2[1][1] + m1[1][2]*m2[2][1],
m1[2][0]*m2[0][1] + m1[2][1]*m2[1][1] + m1[2][2]*m2[2][1]],
[m1[0][0]*m2[0][2] + m1[0][1]*m2[1][2] + m1[0][2]*m2[2][2],
m1[1][0]*m2[0][2] + m1[1][1]*m2[1][2] + m1[1][2]*m2[2][2],
m1[2][0]*m2[0][2] + m1[2][1]*m2[1][2] + m1[2][2]*m2[2][2]]]
#------------------------------------------------------------------------------
def transpose(m1):
return [[m1[0][0],
m1[1][0],
m1[2][0]],
[m1[0][1],
m1[1][1],
m1[2][1]],
[m1[0][2],
m1[1][2],
m1[2][2]]]
#------------------------------------------------------------------------------
def get_system2(x,y):
z = cross_product(x,y)
z = normalize(z)
y = cross_product(z,x);
y = normalize(y);
x = normalize(x);
return [x,y,z]
#------------------------------------------------------------------------------
def scale_system(s,factor):
r = []
for a in s:
r.append([a[0]*factor,a[1]*factor,a[2]*factor])
return r
#------------------------------------------------------------------------------
def transpose(m):
return [[m[0][0], m[1][0], m[2][0]],
[m[0][1], m[1][1], m[2][1]],
[m[0][2], m[1][2], m[2][2]]]
#------------------------------------------------------------------------------
def transform_about_point(m,v,p):
return add(transform(m,sub(v,p)),p)
#------------------------------------------------------------------------------
def get_angle(v1,v2): # v1,v2 must be unit vectors
denom = (math.sqrt(((v1[0]*v1[0]) + (v1[1]*v1[1]) + (v1[2]*v1[2]))) *
math.sqrt(((v2[0]*v2[0]) + (v2[1]*v2[1]) + (v2[2]*v2[2]))))
if denom>1e-10:
result = ( (v1[0]*v2[0]) + (v1[1]*v2[1]) + (v1[2]*v2[2]) ) / denom
else:
result = 0.0
result = math.acos(result)
return result
#------------------------------------------------------------------------------
def get_angle_formed_by(p1,p2,p3): # angle formed by three positions in space
# based on code submitted by Paul Sherwood
r1 = distance(p1,p2)
r2 = distance(p2,p3)
r3 = distance(p1,p3)
small = 1.0e-10
if (r1 + r2 - r3) < small:
# This seems to happen occasionally for 180 angles
theta = math.pi
else:
theta = math.acos( (r1*r1 + r2*r2 - r3*r3) / (2.0 * r1*r2) )
return theta;
#------------------------------------------------------------------------------
def project(v,n):
dot = v[0]*n[0] + v[1]*n[1] + v[2]*n[2]
return [ dot * n[0], dot * n[1], dot * n[2] ]
#------------------------------------------------------------------------------
def remove_component(v, n):
dot = v[0]*n[0] + v[1]*n[1] + v[2]*n[2]
return [v[0] - dot * n[0], v[1] - dot * n[1], v[2] - dot * n[2]]
#------------------------------------------------------------------------------
def normalize(v):
vlen = math.sqrt((v[0]*v[0]) + (v[1]*v[1]) + (v[2]*v[2]))
if vlen>RSMALL4:
return [v[0]/vlen,v[1]/vlen,v[2]/vlen]
else:
return get_null()
#------------------------------------------------------------------------------
def reverse(v):
return [ -v[0], -v[1], -v[2] ]
#------------------------------------------------------------------------------
def normalize_failsafe(v):
vlen = math.sqrt((v[0]*v[0]) + (v[1]*v[1]) + (v[2]*v[2]))
if vlen>RSMALL4:
return [v[0]/vlen,v[1]/vlen,v[2]/vlen]
else:
return [1.0,0.0,0.0]
#------------------------------------------------------------------------------
def rotation_matrix(angle,axis):
x=axis[0]
y=axis[1]
z=axis[2]
s = math.sin(angle)
c = math.cos(angle)
mag = math.sqrt( x*x + y*y + z*z )
if abs(mag)<RSMALL4:
return get_identity()
x /= mag
y = y / mag
z = z / mag
xx = x * x
yy = y * y
zz = z * z
xy = x * y
yz = y * z
zx = z * x
xs = x * s
ys = y * s
zs = z * s
one_c = 1.0 - c
return [[ (one_c * xx) + c , (one_c * xy) - zs, (one_c * zx) + ys],
[ (one_c * xy) + zs, (one_c * yy) + c , (one_c * yz) - xs],
[ (one_c * zx) - ys, (one_c * yz) + xs, (one_c * zz) + c ]]
#------------------------------------------------------------------------------
def transform_array(rot_mtx,vec_array):
'''transform_array( matrix, vector_array ) -> vector_array
'''
return map( lambda x,m=rot_mtx:transform(m,x), vec_array )
#------------------------------------------------------------------------------
def translate_array(trans_vec,vec_array):
'''translate_array(trans_vec,vec_array) -> vec_array
Adds 'mult'*'trans_vec' to each element in vec_array, and returns
the translated vector.
'''
return map ( lambda x,m=trans_vec:add(m,x),vec_array )
#------------------------------------------------------------------------------
def fit_apply(fit_result,vec_array):
'''fit_apply(fir_result,vec_array) -> vec_array
Applies a fit result to an array of vectors
'''
return map( lambda x,t1=fit_result[0],mt2=negate(fit_result[1]),
m=fit_result[2]: add(t1,transform(m,add(mt2,x))),vec_array)
#------------------------------------------------------------------------------
def fit(target_array, source_array):
'''fit(target_array, source_array) -> (t1, t2, rot_mtx, rmsd) [fit_result]
Calculates the translation vectors and rotation matrix required
to superimpose source_array onto target_array. Original arrays are
not modified. NOTE: Currently assumes 3-dimensional coordinates
t1,t2 are vectors from origin to centers of mass...
'''
# Check dimensions of input arrays
if len(target_array) != len(source_array):
print ("Error: arrays must be of same length for RMS fitting.")
raise ValueError
if len(target_array[0]) != 3 or len(source_array[0]) != 3:
print ("Error: arrays must be dimension 3 for RMS fitting.")
raise ValueError
nvec = len(target_array)
ndim = 3
maxiter = 200
tol = 0.001
# Calculate translation vectors (center-of-mass).
t1 = get_null()
t2 = get_null()
tvec1 = get_null()
tvec2 = get_null()
for i in range(nvec):
for j in range(ndim):
t1[j] = t1[j] + target_array[i][j]
t2[j] = t2[j] + source_array[i][j]
for j in range(ndim):
t1[j] = t1[j] / nvec
t2[j] = t2[j] / nvec
# Calculate correlation matrix.
corr_mtx = []
for i in range(ndim):
temp_vec = []
for j in range(ndim):
temp_vec.append(0.0)
corr_mtx.append(temp_vec)
rot_mtx = []
for i in range(ndim):
temp_vec = []
for j in range(ndim):
temp_vec.append(0.0)
rot_mtx.append(temp_vec)
for i in range(ndim):
rot_mtx[i][i] = 1.
for i in range(nvec):
for j in range(ndim):
tvec1[j] = target_array[i][j] - t1[j]
tvec2[j] = source_array[i][j] - t2[j]
for j in range(ndim):
for k in range(ndim):
corr_mtx[j][k] = corr_mtx[j][k] + tvec2[j]*tvec1[k]
# Main iteration scheme (hardwired for 3X3 matrix, but could be extended).
iters = 0
while (iters < maxiter):
iters = iters + 1
ix = (iters-1)%ndim
iy = iters%ndim
iz = (iters+1)%ndim
sig = corr_mtx[iz][iy] - corr_mtx[iy][iz]
gam = corr_mtx[iy][iy] + corr_mtx[iz][iz]
sg = (sig**2 + gam**2)**0.5
if sg != 0.0 and (abs(sig) > tol*abs(gam)):
sg = 1.0 / sg
for i in range(ndim):
bb = gam*corr_mtx[iy][i] + sig*corr_mtx[iz][i]
cc = gam*corr_mtx[iz][i] - sig*corr_mtx[iy][i]
corr_mtx[iy][i] = bb*sg
corr_mtx[iz][i] = cc*sg
bb = gam*rot_mtx[iy][i] + sig*rot_mtx[iz][i]
cc = gam*rot_mtx[iz][i] - sig*rot_mtx[iy][i]
rot_mtx[iy][i] = bb*sg
rot_mtx[iz][i] = cc*sg
else:
# We have a converged rotation matrix. Calculate RMS deviation.
vt1 = translate_array(negate(t1),target_array)
vt2 = translate_array(negate(t2),source_array)
vt3 = transform_array(rot_mtx,vt2)
rmsd = 0.0
for i in range(nvec):
rmsd = rmsd + distance_sq(vt1[i], vt3[i])
rmsd = math.sqrt(rmsd/nvec)
return(t1, t2, rot_mtx, rmsd)
# Too many iterations; something wrong.
print ("Error: Too many iterations in RMS fit.")
raise ValueError
|
SBRG/ssbio
|
ssbio/biopython/Bio/Struct/cpv.py
|
Python
|
mit
| 14,035
|
[
"PyMOL"
] |
b6d58e3df9db7f9d0c4fdadc50cd6967ae81472d7b274ff145a2d4c9ec895c3f
|
# coding: utf8
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
Example input for simulating a ring with multiple RF stations
No intensity effects
:Authors: **Helga Timko**
'''
from __future__ import division, print_function
import numpy as np
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.trackers.tracker import RingAndRFTracker
from blond.trackers.utilities import total_voltage
from blond.beam.beam import Beam, Proton
from blond.beam.distributions import bigaussian
from blond.beam.profile import CutOptions, Profile, FitOptions
from blond.monitors.monitors import BunchMonitor
from blond.plots.plot import Plot
import os
this_directory = os.path.dirname(os.path.realpath(__file__)) + '/'
try:
os.mkdir(this_directory + '../output_files')
except:
pass
try:
os.mkdir(this_directory + '../output_files/EX_04_fig')
except:
pass
# Simulation parameters -------------------------------------------------------
# Bunch parameters
N_b = 1.e9 # Intensity
N_p = 10001 # Macro-particles
tau_0 = 0.4e-9 # Initial bunch length, 4 sigma [s]
# Machine and RF parameters
C = 26658.883 # Machine circumference [m]
p_s = 450.e9 # Synchronous momentum [eV]
h = 35640 # Harmonic number
V1 = 2e6 # RF voltage, station 1 [eV]
V2 = 4e6 # RF voltage, station 1 [eV]
dphi = 0 # Phase modulation/offset
gamma_t = 55.759505 # Transition gamma
alpha = 1./gamma_t/gamma_t # First order mom. comp. factor
# Tracking details
N_t = 2000 # Number of turns to track
dt_plt = 200 # Time steps between plots
# Simulation setup ------------------------------------------------------------
print("Setting up the simulation...")
print("")
# Define general parameters containing data for both RF stations
general_params = Ring([0.3*C, 0.7*C], [[alpha], [alpha]],
[p_s*np.ones(N_t+1), p_s*np.ones(N_t+1)],
Proton(), N_t, n_sections = 2)
# Define RF station parameters and corresponding tracker
beam = Beam(general_params, N_p, N_b)
rf_params_1 = RFStation(general_params, [h], [V1], [dphi],
section_index=1)
long_tracker_1 = RingAndRFTracker(rf_params_1, beam)
rf_params_2 = RFStation(general_params, [h], [V2], [dphi],
section_index=2)
long_tracker_2 = RingAndRFTracker(rf_params_2, beam)
# Define full voltage over one turn and a corresponding "overall" set of
#parameters, which is used for the separatrix (in plotting and losses)
Vtot = total_voltage([rf_params_1, rf_params_2])
rf_params_tot = RFStation(general_params, [h], [Vtot], [dphi])
beam_dummy = Beam(general_params, 1, N_b)
long_tracker_tot = RingAndRFTracker(rf_params_tot, beam_dummy)
print("General and RF parameters set...")
# Define beam and distribution
bigaussian(general_params, rf_params_tot, beam, tau_0/4,
reinsertion = 'on', seed=1)
print("Beam set and distribution generated...")
# Need slices for the Gaussian fit; slice for the first plot
slice_beam = Profile(beam, CutOptions(n_slices=100),
FitOptions(fit_option='gaussian'))
# Define what to save in file
bunchmonitor = BunchMonitor(general_params, rf_params_tot, beam,
this_directory + '../output_files/EX_04_output_data',
Profile=slice_beam, buffer_time=1)
# PLOTS
format_options = {'dirname': this_directory + '../output_files/EX_04_fig', 'linestyle': '.'}
plots = Plot(general_params, rf_params_tot, beam, dt_plt, dt_plt, 0,
0.0001763*h, -450e6, 450e6, xunit='rad',
separatrix_plot=True, Profile=slice_beam,
h5file=this_directory + '../output_files/EX_04_output_data',
histograms_plot=True, format_options=format_options)
# For testing purposes
test_string = ''
test_string += '{:<17}\t{:<17}\t{:<17}\t{:<17}\n'.format(
'mean_dE', 'std_dE', 'mean_dt', 'std_dt')
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
# Accelerator map
map_ = [long_tracker_1] + [long_tracker_2] + [slice_beam] + [bunchmonitor] + \
[plots]
print("Map set")
print("")
# Tracking --------------------------------------------------------------------
for i in np.arange(1,N_t+1):
print(i)
long_tracker_tot.track()
# Track
for m in map_:
m.track()
# Define losses according to separatrix and/or longitudinal position
beam.losses_separatrix(general_params, rf_params_tot)
beam.losses_longitudinal_cut(0., 2.5e-9)
# For testing purposes
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(beam.dE), np.std(beam.dE), np.mean(beam.dt), np.std(beam.dt))
with open(this_directory + '../output_files/EX_04_test_data.txt', 'w') as f:
f.write(test_string)
print("Done!")
|
blond-admin/BLonD
|
__EXAMPLES/main_files/EX_04_Stationary_multistation.py
|
Python
|
gpl-3.0
| 5,450
|
[
"Gaussian"
] |
c3845388a5adbef015e48b8c4a3fbed904804929e2b5a3d0eef3ae3863031fdb
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import gtk
from kiwi.currency import currency
from kiwi.datatypes import ValidationError
from kiwi.utils import gsignal
from stoqlib.api import api
from stoqlib.domain.account import Account, AccountTransaction
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.accounteditor import AccountEditor
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.editors.paymenteditor import get_dialog_for_payment
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class AccountTransactionEditor(BaseEditor):
""" Account Transaction Editor """
gladefile = "AccountTransactionEditor"
proxy_widgets = ['description', 'code', 'date', 'value', 'is_incoming']
model_type = AccountTransaction
model_name = _('transaction')
confirm_widgets = ['description', 'code', 'value']
gsignal('account-added')
def __init__(self, store, model, account):
self.parent_account = store.fetch(account)
self.new = False
BaseEditor.__init__(self, store, model)
payment_button = gtk.Button(_("Show Payment"))
payment_button.connect("clicked", self._on_payment_button__clicked)
box = self.main_dialog.action_area
box.pack_start(payment_button, False, False)
box.set_child_secondary(payment_button, True)
box.set_layout(gtk.BUTTONBOX_END)
# Setup the label, according to the type of transaction
account_labels = Account.account_labels[account.account_type]
self.is_incoming.set_label(account_labels[0])
self.is_outgoing.set_label(account_labels[1])
self.is_outgoing.set_active(self.model.source_account.id == account.id)
payment_button.set_sensitive(self.model.payment is not None)
payment_button.show()
def create_model(self, store):
return AccountTransaction(code=u"",
description=u"",
value=currency(0),
payment=None,
date=datetime.datetime.today(),
account=sysparam.get_object(store, 'IMBALANCE_ACCOUNT'),
source_account=self.parent_account,
operation_type=AccountTransaction.TYPE_OUT,
store=store)
def _populate_accounts(self):
accounts = self.store.find(Account)
self.account.prefill(api.for_combo(
accounts,
attr='long_description'))
def _get_account(self):
if self.model.account == self.parent_account:
return self.model.source_account
else:
return self.model.account
def setup_proxies(self):
self._populate_accounts()
self.add_proxy(self.model, AccountTransactionEditor.proxy_widgets)
self.account.select(self._get_account())
def validate_confirm(self):
return self.model.value != 0
def on_confirm(self):
account_transaction = self.model
is_incoming = self.is_incoming.get_active()
selected_account = self.account.get_selected()
parent_account = self.parent_account
if selected_account != account_transaction.get_other_account(parent_account):
account_transaction.set_other_account(parent_account, selected_account)
# Invert source and destination accounts. This is used to the source account
# represent the outgoing value.
if is_incoming and account_transaction.account != self.parent_account:
account_transaction.invert_transaction_type()
elif not is_incoming and account_transaction.source_account != self.parent_account:
account_transaction.invert_transaction_type()
def on_description__validate(self, entry, value):
if value is None:
return ValidationError(_("Description must be filled in"))
def on_value__validate(self, entry, value):
if value <= 0:
return ValidationError(_("Value must be greater than zero"))
def on_is_outgoing__toggled(self, *args):
if self.is_outgoing.get_active():
self.account_label.set_text(_(u"Destination:"))
else:
self.account_label.set_text(_(u"Source:"))
def _on_payment_button__clicked(self, button):
self._show_payment()
def on_add_account__clicked(self, button):
self._add_account()
def _show_payment(self):
dialog_class = get_dialog_for_payment(self.model.payment)
run_dialog(dialog_class, self,
self.store, self.model.payment)
def _add_account(self):
store = api.new_store()
parent_account = store.fetch(self.account.get_selected())
model = run_dialog(AccountEditor, self, store,
parent_account=parent_account)
if store.confirm(model):
account = self.store.get(Account, model.id)
self._populate_accounts()
self.account.select(account)
self.emit('account-added')
store.close()
def test(): # pragma nocover
creator = api.prepare_test()
account = creator.create_account()
retval = run_dialog(AccountTransactionEditor, None, creator.trans,
None, account)
api.creator.trans.confirm(retval)
if __name__ == '__main__': # pragma nocover
test()
|
tiagocardosos/stoq
|
stoqlib/gui/editors/accounttransactioneditor.py
|
Python
|
gpl-2.0
| 6,369
|
[
"VisIt"
] |
c6595d54e7295fcb1c90db6dbaf2d9cb0bdf779db3dfaef778279dcaebc3942a
|
import os
import sys
import urllib
import urllib2
import tarfile
import zipfile
import csv
import numpy as np
from ase.test import NotAvailable
from ase import units
from ase.test.tasks.dcdft import DeltaCodesDFTTask as Task
dir = 'Delta'
if len(sys.argv) == 1:
tag = None
reffile = os.path.join(dir, 'WIEN2k.txt')
else:
if len(sys.argv) == 3:
tag = sys.argv[1]
reffile = sys.argv[2]
else:
tag = sys.argv[1]
reffile = os.path.join(dir, 'WIEN2k.txt')
src = 'https://molmod.ugent.be/sites/default/files/Delta_v3-0_0.zip'
name = os.path.basename(src)
if not os.path.exists(dir): os.makedirs(dir)
os.chdir(dir)
if not os.path.exists('calcDelta.py'):
try:
resp = urllib2.urlopen(src)
urllib.urlretrieve(src, filename=name)
z = zipfile.ZipFile(name)
try: # new in 2.6
z.extractall()
except AttributeError:
# http://stackoverflow.com/questions/7806563/how-to-unzip-a-zip-file-with-python-2-4
for f in z.namelist():
fd = open(f, "w")
fd.write(z.read(f))
fd.close()
# AttributeError if unzip not found
except (urllib2.HTTPError, AttributeError):
raise NotAvailable('Retrieval of zip failed')
os.chdir('..')
task = Task(
tag=tag,
use_lock_files=True,
)
# header
h = ['#element', 'V0', 'B0', 'B1']
if not os.path.exists('%s_raw.csv' % tag):
# read calculated results from json file and write into csv
task.read()
task.analyse()
f1 = open('%s_raw.csv' % tag, 'wb')
csvwriter1 = csv.writer(f1)
csvwriter1.writerow(h)
for n in task.collection.names:
row = [n]
if n in task.data.keys():
try:
v = task.data[n]['dcdft volume']
b0 = task.data[n]['dcdft B0'] / (units.kJ * 1e-24)
b1 = task.data[n]['dcdft B1']
row.extend([v, b0, b1])
except KeyError: # completely failed to find eos minimum
row.extend(['N/A', 'N/A', 'N/A'])
else:
# element not calculated
row.extend(['N/A', 'N/A', 'N/A'])
if 'N/A' not in row:
csvwriter1.writerow(row)
f1.close()
# read raw results
csvreader1 = csv.reader(open('%s_raw.csv' % tag, 'r'))
data = {}
for row in csvreader1:
if '#' not in row[0]:
data[row[0]] = {'dcdft volume': float(row[1]),
'dcdft B0': float(row[2]),
'dcdft B1': float(row[3])}
csvwriter2 = csv.writer(open('%s.csv' % tag, 'wb'))
h2 = h + ['%' + h[1], '%' + h[2], '%' + h[3]]
csvwriter2.writerow(h2)
refs = np.loadtxt(reffile,
dtype={'names': ('element', 'V0', 'B0', 'BP'),
'formats': ('S2', np.float, np.float, np.float)})
# convert into dict
refsd = {}
for e, v, b0, b1 in refs:
refsd[e] = [v, b0, b1]
rows = []
rowserr = []
for n in task.collection.names:
row = [n]
if n in data.keys():
if 0:
ref = task.collection.ref[n] # don't use collection data
else:
ref = refsd[n]
try:
v = round(data[n]['dcdft volume'], 3)
b0 = round(data[n]['dcdft B0'], 3)
b1 = round(data[n]['dcdft B1'], 3)
row.extend([v, b0, b1])
except KeyError: # completely failed to find eos minimum
row.extend(['N/A', 'N/A', 'N/A'])
else:
# element not calculated
row.extend(['N/A', 'N/A', 'N/A'])
if 'N/A' not in row:
v0, b00, b10 = ref
ve = round((v - v0) / v0 * 100, 1)
b0e = round((b0 - b00) / b00 * 100, 1)
b1e = round((b1 - b10) / b10 * 100, 1)
rows.append(row)
#print row + ref + [ve, b0e, b1e]
csvwriter2.writerow(row + [ve, b0e, b1e])
# calculate Delta
f = open('%s.txt' % tag, 'wb')
csvwriter3 = csv.writer(f, delimiter='\t')
for r in rows:
csvwriter3.writerow(r)
f.close()
cmd = 'python ' + os.path.join(dir, 'calcDelta.py')
cmd += ' ' + '%s.txt ' % tag + reffile + ' --stdout'
cmd += ' > ' + '%s_Delta.txt' % tag
os.system(cmd)
|
robwarm/gpaw-symm
|
gpaw/test/big/dcdft/analyse.py
|
Python
|
gpl-3.0
| 4,143
|
[
"ASE",
"WIEN2k"
] |
93ca74cea47cda1a6a00c50f58c270799441406640bb22c73f1a53ec63410e1f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
### This program plots a channel's state variables / hinf, htau etc. as a function of voltage.
mechanisms = {
'H_STG': ['minf','mtau'],
'CaS_STG': ['minf','mtau','hinf','htau'],
'CaT_STG': ['minf','mtau','hinf','htau'],
'KA_STG': ['minf','mtau','hinf','htau'],
'Kd_STG': ['ninf','ntau'],
'Na_STG': ['minf','mtau','hinf','htau']
}
import sys
if len(sys.argv)<2:
print "Please print a channel name to be plotted from", mechanisms.keys()
sys.exit(1)
channel_name = sys.argv[1]
if channel_name in mechanisms:
mechanism_vars = mechanisms[channel_name]
else:
print "Undefined channel, please use one of", mechanisms.keys()
sys.exit(1)
import math
# The PYTHONPATH should contain the location of moose.py and _moose.so
# files. Putting ".." with the assumption that moose.py and _moose.so
# has been generated in ${MOOSE_SOURCE_DIRECTORY}/pymoose/ (as default
# pymoose build does) and this file is located in
# ${MOOSE_SOURCE_DIRECTORY}/pymoose/examples
try:
import moose
from moose.neuroml import *
except ImportError:
print "ERROR: Could not import moose."
print "Please add the directory containing moose.py in your PYTHONPATH"
import sys
sys.exit(1)
CELSIUS = 35 # degrees Centigrade
CML = ChannelML({'temperature':CELSIUS})
CML.readChannelMLFromFile('../channels/'+channel_name+'.xml')
from pylab import *
if __name__ == "__main__":
for varidx in range(len(mechanism_vars)/2): # loop over each inf and tau
var = ['X','Y','Z'][varidx]
gate = moose.element('/library/'+channel_name+'/gate'+var)
VMIN = gate.min
VMAX = gate.max
NDIVS = gate.divs
dv = (VMAX-VMIN)/NDIVS
# will use same VMIN, VMAX and dv for A and B tables.
vrange = array([VMIN+i*dv for i in range(NDIVS+1)])
figure()
plot(vrange*1000,gate.tableA/gate.tableB,'b-,') # Assume A and B have corresponding number of entries
xlabel('Voltage (mV)')
ylabel('steady state value')
title('state variable '+mechanism_vars[2*varidx]+' of '+channel_name+' vs Voltage (mV)')
figure()
plot(vrange*1000,1./gate.tableB*1000.,'b-,')
xlabel('Voltage (mV)')
ylabel('tau (ms)')
title('state variable '+mechanism_vars[2*varidx+1]+' of '+channel_name+' vs Voltage (mV)')
show()
|
h-mayorquin/camp_india_2016
|
tutorials/chemical switches/moose/neuroml/lobster_pyloric/channels/ChannelTest.py
|
Python
|
mit
| 2,394
|
[
"MOOSE"
] |
eade9d172f5c5d10ee18d2d7bfe320b38310b5d598b74375d00ebb66ca174df7
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Optimize 800 molecules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import random
from absl import app
from absl import flags
from absl import logging
from baselines.common import schedules
from baselines.deepq import replay_buffer
import networkx as nx
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from rdkit.Contrib import SA_Score
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('sim_delta', 0.0, 'similarity_constraint')
flags.DEFINE_integer('num_episodes', 50, 'episodes.')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
all_mols = [
r'COc1cc2c(cc1OC)CC([NH3+])C2',
r'C[C@@H]1CC[C@@H](C(N)=O)CN1C(=O)c1nnn[n-]1',
r'CC[NH+]1CC[C@@H](CNCc2ccc([O-])c[nH+]2)C1', r'OC[C@@H](Br)C(F)(F)Br',
r'CNC(=O)/C(C#N)=C(/[O-])C1=NN(c2cc(C)ccc2C)C(=O)CC1',
r'C[NH+](C)CCS[C@@H]1C[C@H](C(C)(C)C)CC[C@@H]1C#N',
r'CN(c1ncnc(N2CCN(c3cccc[nH+]3)CC2)c1[N+](=O)[O-])C1CC[NH+](C)CC1',
r'COc1cc(C[NH+]2CC[C@@H]([NH+]3CCCC3)C2)ccc1OCC(=O)N1CCCC1',
r'COCCN1C[C@@]23C=C[C@@H](O2)[C@H](C(=O)N(C)Cc2cnccn2)[C@H]3C1=O',
r'COc1ccc(/C=C2\SC(=O)N(CC(=O)NCC(=O)[O-])C2=O)cc1OC',
r'COCC[NH+]1CC[C@H]2CCCC[C@@H]2C1',
r'CCC[NH+](C1CCC([NH3+])CC1)[C@H]1CCOC1',
r'CC(C)CNC(=O)[C@H](C)[NH+]1CCCN(CC[NH3+])CC1',
r'CN(CC[C@@H]1CCC[C@]1(N)C#N)CC[NH+]1CCCC1',
r'OC[C@H]1C[NH+](Cc2ccccc2)CCC12OCCO2',
r'CC[C@@H](O)[C@@]1(C[NH3+])CCC[C@H](C)C1', r'OCc1cn2c(n1)OC(Cl)=CC2',
r'CCn1ccnc(N2CCCC[C@@H](N3CC[NH+](C)CC3)C2)c1=O',
r'COCCOC[C@H]1CC[NH+](C2C[C@@H](C)O[C@H](C)C2)C1',
r'NC(=O)C1(N2CCCC2)CC[NH2+]CC1',
r'CC[C@H](C)[NH+]1[C@@H](C(=O)[O-])CC[C@H]2CCCC[C@H]21',
r'C=CCn1c(C)nn(C[NH+]2CCC[C@H](C(=O)NCCC)C2)c1=S',
r'O=C(N[C@@H](C(=O)[O-])c1ccccc1)C1CCC(CNC(=O)[C@@H]2Cc3ccccc3C[NH2+]2)CC1',
r'COC[C@H](O)C[NH+]1CCC(C)(C)C1',
r'C[C@@H](C(=O)[O-])[C@@H](N[S@](=O)C(C)(C)C)C(C)(C)C',
r'O=c1n(CCO)c2ccccc2n1CCO',
r'Cc1ccc(C[NH+](C)[C@@H](C)C(=O)NCCc2ccc3c(c2)OCCO3)o1',
r'Cc1[nH+]cn(C[C@H](C)[C@H]2CC[NH+]3CCC[C@H]23)c1C',
r'O=C([O-])c1ccc(CNC(=O)c2cnns2)o1', r'C=CCC[C@@H](C)[NH+](C)CCc1nccs1',
r'C[C@H](Cn1ccnc1)NS(C)(=O)=O',
r'CNC(=O)[C@@H]1CCCN(S(=O)(=O)c2c(C)nn(CC(=O)NC(C)(C)C)c2C)C1',
r'CSCC(=O)NNC(=O)NC[C@@]1([NH+](C)C)CCC[C@H](C)C1',
r'CCNC(=O)c1cccc(NC(=O)C[NH+](C)CC)c1',
r'CCC[NH2+][C@@H]1COC[C@H]1C(=O)NCc1cscc1C',
r'C=C(C)CN/C(N)=[NH+]\Cc1ccc(C)cc1N(C)C',
r'CCO[C@@H]1C[C@@H]([NH+](C)C[C@@H]2CCCN(S(C)(=O)=O)C2)C12CCCCC2',
r'N#Cc1cn(C[C@H]2CCCC[C@H]2O)c(=O)nc1[O-]',
r'C[C@H](CSc1ccc(C(=O)N(C)C)cn1)C(=O)[O-]',
r'CC[C@H]1CN(C(=O)[C@@H]2CC[C@@H]3CCCC[C@@H]3[NH2+]2)CCN1C',
r'CCO[C@@H]1C[C@@H]([NH3+])[C@@H]1Nc1ncc(Cl)cc1F',
r'Cc1cnn(CCCNC(=O)N2CCCC[C@H](N3CC[NH+](C)CC3)C2)c1',
r'Cc1nn(C)c(CO[C@@H]2CCC[C@@H]([NH3+])C2)c1Cl',
r'C[C@@H]1CC[C@@H](C(=O)[O-])[C@H]2C(=O)N(c3ccccc3)C(=O)[C@@H]21',
r'Cc1ccc(NC(=O)C(=O)N2CC[C@H]([NH+]3CCCC3)C2)cc1C(=O)N(C)C',
r'CNc1ccccc1C(=O)N1CCN2C(=O)NC[C@H]2C1',
r'C[C@H]1[C@@H](C)SCC[NH+]1Cc1cccc2cn[nH]c12',
r'CC(C)[C@H](O)[C@]1(C[NH3+])CCc2ccccc21',
r'CCC[NH2+][C@]1(C(=O)OCC)CC[C@H](n2cc(Cl)c(C)n2)C1',
r'Cc1cc([C@H]2CCC[NH+]2CC(=O)NC(N)=O)no1',
r'COc1ccc(Cc2noc(C3CC[NH2+]CC3)n2)cn1',
r'Cc1ccc(C[NH+]2CCC(N3CCC(C(=O)N4CCOCC4)CC3)CC2)o1',
r'COc1ccccc1CC(=O)N1C[C@H]2CC[C@@H]1CN(S(C)(=O)=O)C2',
r'CC1=C(C(=O)C2=C([O-])C(=O)N(CC[NH+](C)C)[C@H]2c2cccc(Cl)c2)[C@H](C)N=N1',
r'CC(C)(C)OC(=O)N1CCc2cccc(C[NH+]3CC[C@H]([N+]4=CCCC4)C3)c21',
r'O=C(NC[C@@H]1CCC[NH+](Cc2ccccc2F)C1)c1nc[nH]n1',
r'Cc1ccc(CCN2C[C@]34C=C[C@H](O3)[C@H](C(=O)N3CC(O)C3)[C@H]4C2=O)cc1',
r'COc1cccc(C(=O)NCC[NH+](C)C2CCCCC2)c1F',
r'CC(C)[NH+]1CCN(CC(=O)NCCc2ccc(F)cc2)CC1',
r'C[C@@H](CO)NC(=O)NC[C@H]1Cc2ccccc2O1',
r'CC(C)(O)CC[NH2+][C@H]1CCCS(=O)(=O)C1',
r'CC1(C)C(=O)NCC[NH+]1Cc1ccc(OCC(F)F)cc1',
r'N#C[C@H]1CN(C(=O)[C@H]2CNCc3ccccc32)CCO1',
r'O=C(NC[C@@H]1CCC[NH+](CC2=c3ccccc3=[NH+]C2)C1)[C@H]1NN=C2C=CC=C[C@H]21',
r'COC(=O)c1cc(NC(=O)[C@H]2CC[NH2+][C@H]2C)ccc1F',
r'C[C@H]1[C@H](C(=O)[O-])CCN1S(=O)(=O)[C@@H](C)C#N',
r'O=C([O-])COc1ccccc1/C=N/NC(=O)C1CC1',
r'CC(C)c1nc(C(=O)N2CCC[C@@H]([NH+]3CCCC3)C2)n[nH]1',
r'Cn1cc[nH+]c1N1CCN(C[C@@H](O)c2cccc(Br)c2)CC1',
r'Cc1nn(C)c(C)c1-c1cc(C(=O)N[C@@H]2CC[C@H]([NH+](C)C)C2)n[nH]1',
r'C[C@H]1CN(C(=O)[C@@H]2CCS(=O)(=O)C2)C[C@H](C)O1',
r'CCOC(=O)C1=C(N)N(C)c2ccccc2[C@@]12C(=O)OC(C)=C2C(C)=O',
r'O=C(NC1CC1)[C@@H]1CCC[NH+](C2CCN(C(=O)c3ccc[nH]3)CC2)C1',
r'CC[C@](C)(NC(=O)[C@](C)(N)c1ccccc1)C(=O)[O-]',
r'CCc1noc(C)c1C[NH+](C[C@@H]1CCCCO1)C(C)C',
r'CCC(CC)([C@H](Cc1nc(C)cs1)NC)[NH+]1CCCC1',
r'CC(C)(C)CS(=O)(=O)N1Cc2nc[nH]c2C[C@H]1C(=O)[O-]',
r'C[NH2+][C@@]1(C(=O)[O-])CC[C@H](Sc2nccc(=O)[nH]2)C1',
r'COc1ncccc1C(=O)NC[C@H]1C[C@H](O)C[NH+]1Cc1ccccc1',
r'CC(C)C[C@@H](C[NH3+])c1nc(C2CCOCC2)no1',
r'O=C1N(C[NH+]2CCN(c3ccccc3)CC2)c2ccccc2C12O[C@@H]1CCCC[C@H]1O2',
r'CCOC(=O)C1(C#N)CC(OC)(OC)C1', r'[NH3+][C@@H](CSCc1nccs1)C(=O)[O-]',
r'CC(C)[C@@H]([NH2+][C@@H](C)CS(C)(=O)=O)c1cccnc1',
r'CCCN[C@]1(C#N)CC[C@H](N2C[C@H](C)OC[C@@H]2C)C1',
r'C[C@H](O)CC#CC[NH+]1CCC[C@H](c2cccnc2)C1',
r'CC(C)OCCS(=O)(=O)N[C@@H]1CCCCC[C@H]1[NH3+]',
r'CN1CCO[C@@H](CN(C)C2(C[NH3+])CCCCC2)C1',
r'Cc1c(C[NH+]2CCC[C@H]2c2ccc3c(c2)OCO3)cc(C#N)n1C',
r'CC[NH+]1CCC2(CC1)OC[C@H](C(=O)[O-])N2C(=O)c1ccc(F)cc1',
r'CC[NH+](CCNC(=O)N[C@H]1CC(=O)N(C(C)(C)C)C1)C(C)C',
r'CC[C@@H]([NH2+]CCN1CCCS1(=O)=O)c1ccc(OC)cc1',
r'CCCn1ncc(C[NH2+]C)c1C(F)(F)F',
r'C[C@H]1C[NH+]2CCCC[C@@H]2CN1C(=O)NC[C@@H](C)C(=O)[O-]',
r'C[C@@H]1CC[C@@H](O)[C@H]([NH+](C)CCOCC2CC2)C1',
r'[NH3+]C[C@H]1CCC[C@H]1S(=O)(=O)c1cccc(F)c1',
r'C[C@H]1[NH2+]CCC[C@@H]1NC(=O)c1cccc(OC(F)F)c1',
r'CC[C@H]1C[C@H](C)CC[C@@H]1[NH2+]CCCN1CCCC1=O',
r'CCN[C@@H]1[C@H]([NH+]2CCC[C@H]3CCC[C@@H]32)CCC1(C)C',
r'FC(F)n1ccnc1CN1CC[NH+](CCN2CCOCC2)CC1',
r'O[C@@H]1C[C@@H](c2nc(C3CC3)no2)[NH+](Cc2c[nH]c3ccccc23)C1',
r'Cc1ccc(-c2ccncc2)cc1NC(=O)C(=O)N[C@H]1CC[C@@H]([NH+](C)C)C1',
r'CC1CCC(C[NH3+])(NC(=O)N[C@H]2CCOC2)CC1',
r'O[C@H](C1CC[NH+](Cc2c(Cl)nc3ccccn23)CC1)C(F)(F)F',
r'C[N+]1(/N=C(\[S-])NN)CCOCC1',
r'NC(=O)CN1c2ccccc2C(=O)N[C@H]1c1cc(Cl)cc([N+](=O)[O-])c1[O-]',
r'C[C@@H](NC(=O)c1cc(C[NH+]2CCC(O)CC2)on1)c1cn(C)c2ccccc12',
r'Cc1cscc1C[NH2+][C@H](C)CS(C)(=O)=O',
r'C[C@@H](C(=O)C1=c2ccccc2=[NH+]C1)[NH+]1CCC[C@@H]1[C@@H]1CC=CS1',
r'CNS(=O)(=O)CC(=O)N[C@H]1CCCN(c2ccccc2)C1',
r'C[S@@](=O)c1ccc(C[NH+]2CCC(OC[C@H]3CCCO3)CC2)cc1',
r'CCN(CC)c1ccc(N)c(N)[nH+]1', r'C[NH2+]C[C@H]1C[C@H]1c1ccccc1Br',
r'C=CC(=O)OCC(C)(C)C[NH+](C)C',
r'COCc1cc([C@@H](C)NC2CC[NH+]([C@@H]3CCCC[C@@H]3O)CC2)ccc1OC',
r'C[C@@H]1CN(Cc2noc(-c3ccc(F)cc3)n2)CC[C@@H]1[NH3+]',
r'COc1ccc(OC)c([C@@H](O)Cc2[nH+]ccn2C)c1',
r'Cc1nnc(S[C@H](C)C(=O)N2CCOCC2)n1C', r'[NH3+][C@H](Cc1ccc(O)cc1)c1ncccn1',
r'CC[NH+]1CCN(C[C@@H](C)CNC(=O)NCc2sccc2C)CC1',
r'CC[C@@H](O)[C@@H]1CCCC[NH+]1Cc1nc2ccccc2n1CC',
r'C[C@H]1CN(S(=O)(=O)[C@@H](C)c2cnccn2)CC[NH2+]1',
r'O=C([O-])C1([C@@H]2CCCC[C@H]2O)CCOCC1',
r'C[NH2+][C@]1(C(=O)[O-])CCC[C@@H](OCC2CCCCC2)C1',
r'CC[NH+](CCO[C@H]1CCCCO1)CC1CC[NH2+]CC1',
r'CC(=O)N1CCc2cc(S(=O)(=O)N[C@@H](C(=O)[O-])C(C)C)ccc21',
r'COc1ccc(-c2ccc(C[NH2+][C@@H]3CC[C@H]([NH+](C)C)C3)o2)c([N+](=O)[O-])c1',
r'CC1(C)O[C@@H]2O[C@@H]3OC(C)(C)O[C@H]3[C@@H]2O1',
r'COc1ccccc1[C@@H]1C[NH+](Cc2cc(C(C)=O)cn2C)C[C@H]1C(=O)[O-]',
r'CCO[C@@H](C)c1noc(CN2CC[NH+]([C@H]3CCCc4ccccc43)CC2)n1',
r'CCN1CCN(C(=O)[C@H]2[C@@H]3C[C@H]4[C@H](OC(=O)[C@H]42)[C@H]3Cl)CC1',
r'CC(C)CNC(=O)[C@H](C)[NH+]1CCC[C@@H]1[C@@H]1CCCCC1=O',
r'CC[C@@H](CSC)[NH+](C)Cn1nc(-c2cccs2)[nH]c1=S',
r'CC(C)(C)c1ccc([C@@]2(C)C[NH+]=C(N)N2CC2CC2)cc1',
r'Cc1nnc(CCC[NH+]2CCC(CC[NH+]3CCCC[C@@H]3C)CC2)o1',
r'C[C@H]1C[C@H]([NH2+]Cc2ccccn2)CS1',
r'C=C(C)[C@@](C)(O)C#CC[NH+]1CCCC[C@@H]1c1cccnc1',
r'CC[C@](C)(C[NH3+])[C@H](O)c1ccc2c(c1)OCO2',
r'C[C@H]1CCCN(C(=O)C2C(C)(C)C2(C)C)[C@@H]1C[NH3+]',
r'O=[N+]([O-])c1ccc([C@@H]2OC[NH+]3COC[C@@H]23)cc1',
r'COC(=O)[C@@H]1NS(=O)(=O)c2ccsc2C1=O',
r'O=C(c1nnn[n-]1)N1CCC[C@@H]1[C@@H]1CCC[NH2+]1',
r'O=C([O-])CC1=C(C(=O)[O-])CCCC1', r'Cn1cc(C(=O)NCCc2ccccc2)c(C(=O)[O-])n1',
r'Cc1nn(C)c(C)c1CN[C@H]1CCC[NH2+]C1',
r'CC(C)[C@H]1C[NH2+]CC[C@]12CCO[C@H](C)C2',
r'COc1ccc(S(=O)(=O)N(CC(=O)N2CC[NH2+]CC2)C(C)C)cc1',
r'CCC1(CO)CC[NH+](Cc2cc(OC)c(O)cc2Br)CC1',
r'CCc1nc2n(n1)CCC[C@@H]2N[C@H]1CCN(C2CC2)C1=O',
r'CC(C)C[C@H](C[NH+](C)C)Nc1ncncc1N',
r'Oc1ccccc1/C=[NH+]/CCC/[NH+]=C/c1ccccc1O',
r'CC(C)C[C@](C)(O)CNC(=O)C1CCC(C[NH3+])CC1',
r'CC(=O)Nc1ccc(CN2CC[NH+](C3CCCC3)[C@H](CCO)C2)cc1',
r'COc1cc(Cl)cc(CN2C[C@@H]3CCC[NH+]3C[C@H]2C)c1OC',
r'CN1CCN(c2ncc(C[NH2+]C(C)(C)C)s2)C(C)(C)C1=O',
r'CC1CCN(S(=O)(=O)c2ccc(C(=O)N3CCC[C@@H]3C(=O)[O-])cc2)CC1',
r'Cc1cc(NC(=O)C(=O)NC[C@H]2CC[NH+](C)C2)ccc1OC(C)C',
r'C#CC[NH2+]CC(=O)Nc1cccc(-c2nncn2C)c1',
r'NC(=O)c1cccc(CNC(=O)[C@@H]2C[C@H]3CC[C@@H]2O3)c1',
r'C/[NH+]=C(/NCc1cc(C)on1)NCc1ccccc1-n1ccnc1',
r'COc1ccccc1CC(=O)N[C@@H]1CS(=O)(=O)C[C@H]1Cl',
r'C[NH2+][C@@H](Cc1csc(C)n1)C(OC)OC', r'COc1c(F)cc([C@H]([NH3+])CO)cc1Cl',
r'Cc1ccc(-c2nc3nc(CN4CC[NH+](C)CC4)cc([O-])n3n2)cc1',
r'COc1ccc([N+](=O)[O-])cc1CN1CCC[NH+](CC(=O)N2CCCC2)CC1',
r'C[C@@H](c1cccc(-c2ccc(C3(O)CC[NH2+]CC3)cc2)c1)[NH+]1CCCC1',
r'CCC[C@@H](c1nnnn1C[C@@H]1CCCO1)[NH+]1CCN(c2cc(C)ccc2C)CC1',
r'CC[NH+](CC)C[C@H]1CC[NH2+]C1', r'C[C@@H]([NH3+])C(=O)N1CC[C@@H](O)C1',
r'C#CC(C)(C)NC[C@H]1CN(C)CCO1',
r'CCOC1CC[NH+](CC[C@@H](O)c2ccc(C)c(F)c2)CC1',
r'COC[C@@H](C)NC(=O)C[NH+]1CCc2sc(-c3csc(C)n3)cc2C1',
r'CCOc1cccc(NC(=O)/C(C#N)=C/[C@H]2C=c3ccccc3=[NH+]2)c1',
r'C[C@H](Cn1cccn1)[NH2+]CC(=O)N1C[C@H](C(N)=O)Oc2ccccc21',
r'C[C@@H](C[NH+]1CCCCC1)NC(=O)c1n[nH]c(C2CC2)n1',
r'C[C@H]1C[C@@H](C)C[NH+](C[C@@H](O)CO[C@@H]2CCC[C@H]2C)C1',
r'Cc1csc([C@H](C)NC(=O)CCC[NH+]2CCCCC2)n1',
r'O[C@]1(C[NH2+][C@@H]2CCN(CC(F)(F)F)C2)CCCc2ccccc21',
r'CCNS(=O)(=O)[C@@H]1CC[NH+](C[C@@H]2CCCc3ccccc32)C1',
r'Cc1nc(CCC(F)(F)F)[nH]c(=O)c1CCC(=O)[O-]',
r'COc1ncnc2c1nc([C@@H](C)Cl)n2[C@H](C)C(N)=O',
r'O=C([O-])[C@H]1C=C[C@H](NS(=O)(=O)c2ccc3c(c2)CCO3)C1',
r'CCC[C@H]1CCC[NH+](CCCS)CC1',
r'CC[S@](=O)[C@@H]1CCC[C@H](NC(=O)NNC(=O)C(C)(C)C)C1',
r'COc1cc(C[NH+]2CCC[C@]3(CCC(=O)N(C4CC4)C3)C2)cc(OC)c1',
r'Cc1ccccc1CC[NH+]1[C@H]2CC[C@@H]1CC(=O)C2',
r'C[C@@H](NC1CC[NH+]([C@H]2CCCC[C@@H]2O)CC1)c1ccsc1',
r'Cc1ccc([C@@](C)(O)CNC(=O)NC[C@@H](c2ccco2)[NH+]2CCCCC2)o1',
r'C[C@@H]1[C@H](C(=O)[O-])CCN1S(=O)(=O)c1ccc(F)c(Cl)c1',
r'COC(=O)/C=C/c1ccc[n+]([O-])c1',
r'[NH3+]C[C@H]1CC[C@@H](C(=O)N(C[C@@H]2CCCO2)[C@H]2CCSC2)O1',
r'N#Cc1nn(C(N)=O)c(N)c1C#N', r'C[C@H](CNC(=O)N1CCN(S(C)(=O)=O)CC1)N1CCOCC1',
r'[NH3+]C[C@@H](c1ccc(F)cc1)[C@@H]1CCS(=O)(=O)C1',
r'CC(C)CNC(=O)NC(=O)[C@H](C)[NH2+]CC1(N2CCSCC2)CCCC1',
r'[NH3+][C@@H](CO)c1cc(C(F)(F)F)cc([N+](=O)[O-])c1[O-]',
r'CCCCN1C(=O)[C@@H]2[C@H](CCC(=O)[O-])'
r'N[C@]3(C(=O)Nc4c(CC)cccc43)[C@@H]2C1=O',
r'Cc1cc(C)nc(NC(=[NH2+])Nc2ccc(S(=O)(=O)Nc3nccc(C)n3)cc2)n1',
r'Cn1cnnc1C[NH+]1CC[C@]2(CCCN(C3CCCC3)C2=O)C1',
r'CCN(CC(C)(C)O)C(=O)[C@H]1C[C@@H]2C=C[C@H]1C2',
r'CC(C)C(=O)NCC[NH2+][C@@H](C)C[C@@H]1CCCCC[NH2+]1',
r'CC(C)C[NH2+]C[C@H]1CCCO[C@H]1[C@H]1CC=CCC1',
r'Cc1nc2c(s1)[C@H]([NH+](C)Cc1cccn1C)CCC2',
r'COc1ccc([C@@H]2CN(C(=O)Cn3nc4ccccc4n3)C[C@H]2[NH+](C)C)cc1',
r'CC(=O)NCCn1c(SCC(=O)[O-])nc2cccnc21',
r'C[C@@H]([NH2+]C[C@@H](C)[S@](C)=O)c1ccc(F)c(F)c1',
r'C[NH2+]C1CCN(C(=O)C2(C)CCCC2)CC1',
r'CCC(CC)[C@H](CNC(=O)c1cnc2c(C)cccn2c1=O)[NH+](C)C',
r'C[NH2+]C[C@H](C)[C@H](C)n1cccn1',
r'CC(C)[NH+]1CCCN(C(=O)C(C)(C)c2cccc(C#N)c2)CC1',
r'CC(C)CN1CCO[C@H](CNC(=O)N2CCN(C(=O)[C@H]3C[C@H]3C)CC2)C1',
r'CNS(=O)(=O)[C@H]1CCC[NH+]([C@@H](C)c2ncc(C(C)(C)C)o2)C1',
r'Cc1[nH+]c(NC[C@@H](C)C[C@H](C)O)ccc1I',
r'COC(=O)C(C)(C)C[NH2+][C@@H]1C[C@H]1c1ccccc1',
r'C[C@@H]1SCCC[C@]1(C[NH3+])N1CCC(C[NH+](C)C)CC1',
r'CCC[NH2+][C@H](Cc1nn(C)c2ccccc12)c1ncc[nH]1',
r'O=C([O-])[C@@H]1C[C@@H]1C(=O)N1CC2(CCCC2)c2c(F)cccc21',
r'COc1cc(Br)c(C[NH2+]C[C@H](C)O)cc1OC',
r'COc1ccc(N2/C(=N/C(=O)CCCC(=O)[O-])S[C@@H]3CS(=O)(=O)C[C@H]32)cc1Cl',
r'CC[C@@H](C)[NH+]1CCN([C@H]2CC[C@H]([NH2+]C)C2)CC1',
r'CCCN1CC(=O)N2[C@@H](CC3=c4ccccc4=[NH+][C@@H]3[C@H]2c2ccccc2OC)C1=O',
r'Cc1noc(C[NH+]2CC[C@@H](OCCCc3ccccc3)C2)n1',
r'CCc1nn(C)c(C[C@]2(C[NH3+])CCCC(C)(C)[C@@H]2O)c1Cl',
r'COc1cc(NC(=O)C[C@@H]2C[NH2+]CCO2)cc(OC)c1',
r'COC1CC[NH+](CCNc2nccn(C)c2=O)CC1',
r'C[C@@H](O)c1cc(F)ccc1N(C)C[C@@H]1CCC[NH+]1C',
r'OCCC#Cc1cc(F)cc(C[NH+]2CCC[C@H]2CO)c1',
r'C[C@H]1[NH2+]CC[C@@H]1c1nncn1C1CCCCC1',
r'CC[NH+]1CCN(Cc2nc3nc(C)cc(N4CCCCCC4)n3n2)CC1',
r'COc1cc(C)ccc1NC(=O)N[C@H]1CC[C@H]([NH+](C)C)C1',
r'O=C(NCCCc1n[nH]c(=O)[nH]1)[C@H]1C[C@@H]1c1c(F)cccc1F',
r'CCN(CCOC)[C@]1(C[NH3+])CCCS[C@@H]1C',
r'CC(C)c1cc(NC(=O)C[NH+]2CCC[C@H]2c2cccs2)on1',
r'[NH3+][C@@H](Cc1cccc(F)c1F)[C@H]1CN2CCC[C@@H]2CO1',
r'C[C@H](CCO)[NH2+][C@H]1CCc2c(Br)cccc21',
r'CC(C)[C@@H](NC(=O)c1ccc(NS(C)(=O)=O)cc1)C(=O)[O-]',
r'COc1ccccc1[C@H](C)NC(=O)C[NH+](C)C1CCS(=O)(=O)CC1',
r'COCc1ccc(C[NH+](C)Cc2ccccc2O)o1',
r'COC(=O)c1sccc1NC(=O)[C@@H]1CC[NH2+][C@@H]1C',
r'COCCCS(=O)(=O)/N=C(\[O-])c1cnn(C(C)C)c1C',
r'[N-]=[N+]=NC[C@H](Nc1ccc(Br)cc1)C(=O)[O-]',
r'Cc1ccc([C@H](NC(=O)NC[C@H]2CC[NH+](C3CC3)C2)C2CC2)cc1',
r'Cc1ccc(F)cc1NC(=O)C(=O)NCCCn1cc[nH+]c1',
r'CC[NH+]1CCC[C@@H]1CNC(=O)N1CCC[C@H]([NH+](C)C)C1',
r'Cc1nsc(N2CCC[NH+](Cc3ncccc3C)CC2)n1',
r'Cc1nc(C[NH+]2CCC[C@@H]2CN2CCOCC2)cs1',
r'Cc1c(C[NH+]2CC[C@@H](N3CCOCC3)[C@H](O)C2)[nH]c2c(C)cccc12',
r'CCN[C@@H]1c2cc(OC)ccc2C[C@H]1[NH+](C)C(C)C',
r'COc1cc(C[NH+]2CC[C@@H](NCc3scnc3C)C2)cc(OC)c1',
r'CC[C@H](C)[C@H]1OCCC[C@H]1C(=O)[O-]',
r'Cc1c(Cl)cccc1S(=O)(=O)NC[C@@H](C)CN1CC[NH+](C)CC1',
r'CC(C)[C@@H]1C(=O)NCC[NH+]1Cc1cc(F)cc(F)c1',
r'C[C@@H](C[C@@H]1CCCC[NH2+]1)[NH2+]C[C@@H]1CCCC[NH+]1C',
r'[NH3+]C[C@](O)(CN1CC[NH+]2CCCC[C@H]2C1)C1CC1',
r'Cc1ccc([N+](=O)[O-])cc1NC(=O)C(=O)N1CC[C@H]([NH+]2CCCC2)C1',
r'CC(C)C[NH+](C)CCC(=O)[O-]', r'OCc1ccc(CN2CCCC[C@@H]([NH+]3CCCC3)C2)o1',
r'CCCn1nnnc1CN1CC[C@]2(C1)NC(=O)N(C(C)C)C2=O',
r'C[C@H]1CN(c2ccccc2C[NH2+]C2CCC(O)CC2)C[C@@H](C)O1',
r'CC[C@H](C)C[C@H](C)NC(=O)[C@@H]1CCC[NH2+][C@@H]1C',
r'COCC[N+]1=C(C)C[C@@H](C(=O)CSc2nccc([O-])n2)[C@H]1C',
r'CC[NH2+][C@H]([C@@H]1CN2CCC[C@@H]2CO1)[C@@H]1CCCC[C@H]1CC',
r'CCC(CC)([C@@H](Cc1ccc[nH+]c1N)NC)[NH+](C)C',
r'CN(C)C(=O)O[C@@H]1CCCC[C@H]1C[NH3+]',
r'Nc1ccc(Cl)c(S(=O)(=O)N2CCN3C(=O)NC[C@H]3C2)c1',
r'COc1ccc(C[NH+]2CCNC(=O)CC2)cc1OCC(C)C',
r'O=C(NCC[NH+]1CCN(C(=O)c2ccccc2[N+](=O)[O-])CC1)[C@H]1CC(=O)N(c2ccccc2)C1',
r'[NH3+][C@H](C(=O)[O-])[C@H](O)c1ccc(F)cc1',
r'CN(C)C(=O)C[C@H](C[NH3+])N1CCOCC1(C)C',
r'C[C@H](CN(C)C(=O)c1ccc(F)c(F)c1F)C(=O)[O-]',
r'CCc1ccc([C@@H](C)NC(=O)[C@H](C)[NH+]2CCc3n[nH]cc3C2)cc1',
r'CC(=O)C1=C([O-])C(=O)N(CCC2=c3ccccc3=[NH+][C@H]2C)[C@H]1c1ccccc1F',
r'[NH3+][C@H]1CCC[C@@H]1CCSc1n[nH]c(=O)n1C1CC1',
r'CC(C)Cc1nc(SCC(=O)NC[C@@H]2CCCO2)c2c(=O)n(C)c(=O)n(C)c2n1',
r'CC[NH+]1CCC[C@H](NC(=O)c2ccc(OC)c(O)c2)C1',
r'C[NH+]1CC[C@@H](NC(=O)NCCS(C)(=O)=O)[C@@H]1c1ccc(Cl)cc1',
r'CC(C)Cn1cc[nH+]c(N2C[C@H]3CC[C@@H]2C3)c1=O',
r'CCOC(=O)C1CCC(NC(=O)[C@@](C)([NH3+])CC)CC1',
r'C[C@H](CCCO)NC(=O)C[C@H]1CCS(=O)(=O)C1',
r'O=C(c1cc(COc2ccc(-n3cncn3)cc2)on1)N1CC[NH+]2CCC[C@H]2C1',
r'CC(C)[C@H]1CN(C(=O)CCC(N)=O)CCC[NH+]1Cc1ccc(F)cc1',
r'N#Cc1ccnc(N2CCC([NH2+]C[C@@H]3CCCO3)CC2)c1', r'[NH3+]CC(=O)c1ccc(F)cc1',
r'CNC(=O)[C@H](C)C[NH+](C)Cc1nnc2n(C)c(=O)c3cc(C)ccc3n12',
r'CC[NH+]1CCC[C@@H]1C[NH+](C)CCC(=O)[O-]',
r'CCNC(=O)CN(CC)C(=O)CC1([NH3+])CCC1',
r'CC1=C(CC[NH+]2CCC[C@@H](C(N)=O)C2)C(C)(C)CCC1',
r'COc1cccc(O[C@@H]2CC[C@H]([NH3+])C2)n1',
r'C[S@](=O)CC[NH2+]C/C=C/c1ccc(C#N)cc1',
r'CN[C@]1(C#N)CCC[C@H]([NH+](C)CCc2ccccn2)C1',
r'Cc1nn(-c2ccccc2)c(C)c1CNC(=O)[C@@H]1[C@@H](C(=O)[O-])[C@@H]2C=C[C@H]1C2',
r'CCC[C@H](C)NC(=O)C[NH2+]Cc1cscc1C',
r'C/[NH+]=C(/NCc1[nH+]ccn1CC(C)C)N1C[C@H]2CC=CC[C@@H]2C1',
r'CC(C)[NH2+]CC1CC[NH+](CCSc2ccccc2)CC1',
r'C[C@@H](Cc1[nH+]ccn1C)C[C@@H](C)Br',
r'C[C@H](SCC[NH3+])c1ccc(C(=O)[O-])o1',
r'Cc1ccc2ncc(C(=O)N(C)C3CC[NH+](C(C)C)CC3)n2c1',
r'COCCn1c2ccccc2n2c(=O)n(CC(=O)N(C)C)nc12',
r'CC1(C)CCC[C@@H]1NC(=O)COCC(=O)[O-]',
r'CC1=C[C@@H](C)[C@H]2C(=O)N([C@H](Cc3ccccc3)C(=O)[O-])C(=O)[C@H]2C1',
r'COC(=O)C[C@H](NC(=O)N1CC[NH+]2CCC[C@@H]2C1)C(=O)[O-]',
r'c1cnc2c(O[C@H]3CCC[NH2+]C3)cccc2c1',
r'C[C@@H]1CCC[NH+](C[C@@H](C)NC(=O)N[C@@H]2CCCC[C@@H]2n2cccn2)C1',
r'CCC[NH2+][C@H]1[C@H](S(=O)(=O)C(C)C)CCC1(C)C',
r'CN(CC[NH+](C)C)C(=O)C[C@H]1COCCN1C(=O)c1ccc2[nH]nnc2c1',
r'CC(C)CNC(=O)CNC(=O)[C@H]1CCC[NH+]1Cc1ccc(F)cc1',
r'CC1CCN(C(=O)C[NH+]2CCC[C@@H](c3nc4ccccc4o3)C2)CC1',
r'CCCCOc1ccccc1C[C@@H]([NH3+])C(=O)[O-]',
r'NC(=O)c1n[nH]c2ccc(NC(=O)C(=O)NCC[C@H]3C[C@H]4CC[C@@H]3C4)cc12',
r'O[C@H]1CCC2C1C1CC[C@H](O)C21', r'CCc1nn(C)c(C(=O)NC[C@@H](CC)CCO)c1N',
r'CC(C)CN1C(=O)C2(CCCC2)[NH2+]C12CCN(C(=O)c1ccoc1)CC2',
r'COc1ccc(NC(=O)[C@H]2C[C@H]3CC[C@@H]2O3)cn1',
r'CCOC(=O)N[C@@H]1CCCN(C(=O)NC[C@H]2CC[NH+](C3CC3)C2)C1',
r'CNc1nc(C2CCN(C(=O)Cc3ccccn3)CC2)[nH+]c2c1CN(C(C)=O)CC2',
r'C[NH+](C)[C@@H]1CCC[C@@H](NC(=O)NCc2cc(=O)[nH]c3ccccc23)C1',
r'C[NH+](C)[C@@H]1CC[C@@H](NC(=O)c2ccc3c(c2)CCCN3S(C)(=O)=O)C1',
r'CC(C)[C@H](CO)NS(=O)(=O)c1ccsc1C(=O)[O-]',
r'C[C@H]1CCC[C@H](NC(=O)CN2CC[NH+](C/C=C/c3ccco3)CC2)C1',
r'NC(=O)CN1CC2(CCC1=O)CC[NH+](Cc1ccnc3ccccc13)CC2',
r'C[NH+](CC(=O)N1CCOC1=O)CC1CC[NH2+]CC1',
r'CCCn1c(C[C@@]2(O)CCC[NH2+]C2)nc2ccccc21',
r'CC[NH2+][C@@]1(C(=O)OC)CCC[C@@H](Oc2ccccc2)C1',
r'CCCC[C@@H](C(=O)N1CCN(CC(=O)N2CCCC[C@@H]2C)CC1)N1CCCS1(=O)=O',
r'C[NH+](C)[C@@H]1CC[C@H](NC(=O)[C@@H]2CCCc3[nH]ncc32)C1',
r'CC(C)CCNC(=O)[C@@H](C)Oc1ccc(N)cc1C(=O)[O-]',
r'O=C(NCCCN1CCOCC1)c1nc(-c2cnccn2)no1',
r'CCC[NH2+][C@H](Cc1ccccc1)[C@@H]1CN(CC)CCO1',
r'Cn1nncc1C[NH+](CC1CCCCC1)C1CC1',
r'C[C@H]1C[C@H]([NH+]2CC[C@H](S(=O)(=O)NC3CC3)C2)CC(C)(C)C1',
r'NC(=O)[C@]1([NH2+]C2CC2)CC[C@H](Oc2cc(F)cc(F)c2)C1',
r'O=C(NC[C@H]1CCS(=O)(=O)C1)N1CCC[C@H]([NH+]2CCCC2)C1',
r'COc1ccc(OC)c([C@H]2CC[NH+](c3c([O-])c(=[OH+])c3=O)C2)c1',
r'CC(C)C[C@@H]([NH3+])C(=O)N1CC[C@H](C(=O)[O-])[C@@H]1C',
r'O=C(c1cccc(F)c1)N1CCCC[C@@H]1c1nc2c(c(=O)[nH]1)C[NH+](Cc1cccnc1)CC2',
r'CC1CCC(C#N)([C@H](O)C=O)CC1',
r'C=CCN(CC(=O)[O-])C(=O)[C@@H](C[NH3+])C(C)C',
r'CS[C@H]1CC[C@H](NC(=O)N[C@@H](C)Cn2cc[nH+]c2)C1',
r'CC(C)n1cc(S(=O)(=O)N2CCn3c(nn(C)c3=O)C2)cn1',
r'C[NH+](Cc1ccsc1)Cc1ccc(C(N)=O)cc1[N+](=O)[O-]',
r'CC(C)[C@H](C)[NH+]1Cc2cccc(NCc3cc(=O)n4ccsc4[nH+]3)c2C1',
r'O=C([O-])CCNC(=O)N[C@@H]1CCOC1', r'O=C([O-])COCCNC(=O)[C@@H]1CCCCO1',
r'CCCCn1nc(C)c(C[NH2+]C[C@@H](C)O)c1Cl',
r'O=C(CCCn1cncn1)N1CCC[C@@H](N2CCNC2=O)C1',
r'C[C@H]1CN(C(=O)NCc2ncnn2C)C[C@@H](C)O1',
r'CCc1[nH+]n(C)c2c1NC[C@]1(C)COC[C@H]1N2',
r'CCNC(=O)N1CC[C@@H]([NH2+][C@H](C)CC(=O)Nc2cccc(F)c2)C1',
r'CC(C)[NH+]1CCC(N2CC[NH+](Cc3c(F)ccc(F)c3F)C[C@@H]2CCO)CC1',
r'CC[NH+]1C[C@H](c2ccccc2)CC2(CCN(C(=O)c3ccon3)CC2)C1',
r'[NH3+]C1([C@H]2CCO[C@@]3(CCSC3)C2)CCCC1',
r'CC(C)([C@H](O)c1ccc(Cl)s1)[NH+]1CCCC1',
r'CC(C)N[C@@]1(C#N)CC[C@@H](N(C)C[C@@H]2CC[NH+](C)C2)C1',
r'CCC(=O)N1CCCC[C@@H]1C(=O)NC[C@@H](C1CC1)[NH+](C)C',
r'Cc1nc2n(n1)C[C@H]([NH2+]C[C@@H](O)CN(C)Cc1ccccc1)CC2',
r'C1=C[C@H]2C[C@@H]1C[C@H]2CN1CC[NH+](C2CCCCCC2)CC1',
r'COC(=O)c1sccc1C[NH+](C)CC(=O)N(C)C',
r'CCN(C)S(=O)(=O)c1ccc(C[NH2+]C)cc1F',
r'NC(=O)N1CCC[C@H](C(=O)N2CCC(C(=O)[O-])CC2)C1',
r'CCc1ccc(S(=O)(=O)NCC2([NH+](C)C)CCOCC2)s1',
r'Cc1nn(-c2ccccc2)c(C)c1CN1C[C@@H](C[NH+]2CCCC2)[C@@H](CO)C1',
r'CC1CCN(c2[nH+]cccc2C(=O)[O-])CC1',
r'CC(C)CN(CCC#N)C(=O)NC[C@@H]1CC[C@H](C(=O)[O-])O1',
r'CC(C)N1CC[C@H]([NH2+][C@H](C)CCc2ccc3c(c2)OCO3)C1=O',
r'Cc1nc(CCC[NH+]2CCC[C@H]2C(N)=O)cs1',
r'Cc1ccc(C)c([C@@H](O)[C@@H](C)[C@@H](C)C(=O)[O-])c1',
r'Cc1nn(C)c2ncc(NC(=O)NC[C@H](c3ccccc3)[NH+](C)C)cc12',
r'NC(=O)c1cccc(CNC(=O)[C@@H]2C[C@@H]3C=C[C@H]2C3)c1',
r'CN(C(=O)N[C@H]1CC[C@@H]([NH+](C)C)C1)[C@@H]1CCN(c2ccccc2F)C1=O',
r'O=C(Nc1cccnc1)C1=CC=CN2CCS(=O)(=O)N=C12', r'CCn1ncc(C[NH2+]C)c1C1CC1',
r'CC(C)[C@@H](ON1C(=O)c2ccccc2C1=O)C(=O)[O-]',
r'C[C@H]1CCCC[NH+]1C[C@@H]1CCC(C)(C)[C@@H]1[NH3+]',
r'CNC(=O)CN1CCN(C(=O)c2nc(C)n[nH]2)CC1',
r'Cc1ccccc1C[NH+](C)CC(=O)Nc1cccc(S(=O)(=O)/N=C2\CCCN2)c1',
r'C[C@H]1OCC[C@H]1C(=O)NCc1noc(C(C)(C)C)n1', r'CCOc1ncnc(S(=O)(=O)CC)c1N',
r'CC(=O)c1cn(CCC(=O)N[C@@H]2CCC[NH+](C)C2)c2ccccc12',
r'COC(=O)C(CC[C@@]1(C)[C@@H](C)CC=C[C@H]1O)C(=O)OC',
r'CCOc1ccc(NC(=O)c2ccc(N3C(=O)N4CCC5=c'
r'6ccccc6=[NH+][C@H]5[C@@]4(C)C3=O)cc2)cc1',
r'O=C(Cn1nnn(-c2cccs2)c1=O)NC[C@@H]1CN(Cc2ccccc2)CCO1',
r'[O-]c1nc(-c2cccnc2)nc2c1CC[NH+](Cc1ccnc(N3CCOCC3)n1)C2',
r'Cc1ccn2c(=O)c(C(=O)Nc3n[n-]c(C(F)(F)F)n3)cnc2c1',
r'O=c1[nH]nc([O-])n1/N=C/c1ccco1', r'N#CCCCNC(=O)[C@H]1CC[C@H](C[NH3+])O1',
r'NC(=O)[C@@H]1CCCN(C(=O)Cn2nc(-c3cccs3)oc2=O)C1',
r'CC(C)[C@H](CNC(=O)N[C@H]1C[C@@H]1C)N1CC[NH+](C)CC1',
r'CCCCCSc1nnc([O-])[nH]c1=O',
r'Cn1cc[nH+]c1C[C@H]1CCC[NH+](Cc2ncc(-c3ccccc3Cl)o2)C1',
r'Cc1nc(C)c(C[NH2+]C[C@H](C(C)C)N2CCOCC2)s1',
r'CC[C@H](C)[NH2+]CCc1n[nH]c(-c2ccco2)n1',
r'N#CC(C#N)=CNCCCN1C(=O)[C@@H]2[C@@H]3C=C[C@@H](C3)[C@H]2C1=O',
r'C/[NH+]=C(\NCC(C)(C)c1ccc(OC)cc1)N[C@H]1C[C@H]1C',
r'[NH3+][C@H]1CCC[C@@H]([NH+]2CC[C@@H](c3ccccc3)C2)C1',
r'COc1cccc([C@H]2C[NH2+]CC[NH+]2C)c1',
r'COCCC(=O)NC[C@@H]1CC[C@@H](C(=O)[O-])O1', r'C#CCNCC(=O)NC[C@@H](C)CO',
r'O[C@@H]1C[C@H](CNCc2cnn(-c3ccccc3)c2)[NH+](Cc2ccccc2)C1',
r'CN[C@@]1(C(N)=O)CC[C@@H](N2CC[C@H]([NH+]3CCCCC3)C2)C1',
r'CC(C)CC[NH+]1Cc2cccc(NC(=O)C(=O)NC[C@@H]3CC=CCC3)c2C1',
r'C[C@H](O)C[C@@H](Cc1cccc(Br)c1)C(=O)[O-]',
r'C[C@H]1Cc2cc(C(=O)C3=C([O-])C(=O)N(CCN4CCOCC4)[C@H]3c3ccco3)ccc2O1',
r'C[C@H]1OCC[C@H]1C(=O)N1CCO[C@H](C#N)C1',
r'N#CC1(C#N)[C@H](C(N)=O)[C@@H]1c1ccc(F)cc1',
r'COc1cc(NS(=O)(=O)c2ccc(N3C(=O)[C@H]4'
r'[C@@H]5C=C[C@@H](C5)[C@@H]4C3=O)cc2)nc(OC)n1',
r'COc1ccc(O[C@H]2CC[C@@]([NH2+]C(C)C)(C(=O)[O-])C2)cc1',
r'C[C@H]1CCC[C@@H]([NH+](C)CC(=O)N2CCC(C(=O)N3CCCC3)CC2)C1',
r'Cn1ccc(S(=O)(=O)N2CCO[C@H](CC(=O)[O-])C2)c1',
r'CC[C@](C)([NH3+])C(=O)N1CCOc2ccccc2C1',
r'CC(C)[NH+]1CCC(N2CCN(CC3=c4ccccc4=[NH+]C3)C[C@@H]2CCO)CC1',
r'Cn1c(=O)c(=O)n(CC(=O)N2CCC3(CC2)OCCO3)c2cccnc21',
r'CC[C@@H]1CN(S(=O)(=O)C(C)C)CC[C@@H]1[NH2+]Cc1cccc(C#N)c1',
r'C[NH+](C)CCNC(=O)C[C@@H]1C(=O)NCCN1Cc1c(F)cccc1Cl',
r'Cc1ccc(Cl)cc1N1CCN(C(=O)[C@@H]2CCC[NH2+]2)CC1',
r'C[C@H]1CC(N)=C(C#N)C1(C#N)C#N', r'CCC[NH2+]CC[NH+](C)CC(C)(C)O',
r'CCC[C@H](C[NH3+])[C@@]1(O)CCC[NH+](CC)CC1',
r'CCc1nc(CN2CC[NH+](Cc3cc(Cl)c(OC)c(OC)c3)CC2)no1',
r'O=C([O-])C[C@@H]1CN(c2ccc([N+](=O)[O-])cc2Cl)CCO1',
r'CC(C)(C)OC(=O)[C@@H]1N[C@H](C(=O)[O-])C(C)(C)S1',
r'CCC1CCC(C[NH2+][C@@H]2CCC[C@H]2[C@@H]2CCCC[NH2+]2)CC1',
r'COc1cc(C[NH2+][C@@H]2C[C@H]3CC[C@]2(C)C3(C)C)ccc1OCC(N)=O',
r'O[C@@]1(C[NH2+]C2CCN(C3CCCCC3)CC2)CCOC1',
r'NC(=O)[C@H]1C[NH2+]CCN1c1nc2c(Br)cccn2n1',
r'COc1ccc2[nH]cc([C@H](C(=O)[O-])[NH+]3CCN(Cc4cccc5ccccc45)CC3)c2c1',
r'Cc1cc(C)nc(CCNC(=O)C[C@@H]2C(=O)NCC[NH+]2Cc2ccccc2)n1',
r'Cc1nc(C[C@@H]([NH3+])[C@@H]2CN(C(C)C)CCO2)cs1',
r'CC[C@@H](CCO)C[NH2+][C@H]1C[C@H](C)c2c(C)ccc(O)c21',
r'COCCN1[C@@H](C)CN(C(=O)C[NH+](C)C2CC2)C[C@H]1C',
r'CC(C)C[C@@H](C[NH+](C)C)NC(=O)N1CCC([NH+]2CCCC2)CC1',
r'C[C@H](CCn1cncn1)[NH2+]Cc1ccn(C2CCCC2)n1',
r'C[C@@H](CO)C[NH2+]Cc1cn(C)nc1-c1ccccc1',
r'Cc1c(F)cc(N)cc1S(=O)(=O)NCC(N)=O',
r'CC(C)[NH+](C[C@@H](C)O)C1C[C@H](C)O[C@@H](C)C1',
r'O=C(NC1CCCCC1)C1CCN(C2=NC=NC3=NC=N[C@H]32)CC1',
r'Cc1ccc(C(=O)N[C@H]2CCC[NH2+][C@H]2C)cc1F',
r'Cc1cn2c([nH+]1)CC[C@H](NS(=O)(=O)N1CCO[C@H](C)C1)C2',
r'CCC(C)(C)NC(=O)[C@@H](C)Oc1ccc(C[C@H](C)[NH3+])nc1',
r'CCc1nn(C)cc1NC(=O)C(=O)N1CCN(CC(F)(F)F)[C@@H](C)C1',
r'CCOC[C@H](O)CSC1COC1', r'CCn1nc(C)c(Cl)c1C[NH2+]CC1(C)COC1',
r'O=C(CCNc1ccccc1[N+](=O)[O-])N1CCC[C@@H]([NH+]2CCCC2)C1',
r'COc1cc(OC)cc([C@H]2CC[NH+](CCC(F)(F)F)C2)c1',
r'C=Cn1cc(C[NH+]2CC[C@@H](CNC(=O)c3ccc(C#N)cc3)C2)cn1',
r'C=CC[NH+](CC(=O)[O-])[C@@H]1CCC[C@H](C(C)(C)C)CC1',
r'CCn1cc([C@H]2OCC[C@H]2C[NH2+]Cc2ccc(C)s2)cn1',
r'COC(=O)[C@H]1CCCCC[C@@H]1NC(=O)Cn1nc2ccccn2c1=O',
r'[NH3+]Cc1ccccc1CS(=O)(=O)N1CCN2CCC[C@@H]2C1',
r'O[C@H]1CCCC[C@@H]1[NH+]1CCN(Cc2cnn(-c3ccccc3)c2)CC1',
r'Fc1ccc2c(c1)[C@H]([NH2+]C1CC[NH+](CCN3CCOCC3)CC1)CC2',
r'O=C(OC[C@@H]1CC(=O)N(c2ccccc2)C1)[C@@H]1CCC[NH+]1Cc1ccccc1',
r'O=C1NCCN1[C@H]1CCC[NH+](Cc2cccc(-c3ccncc3)c2)C1',
r'CN1CCO[C@@H](Cn2cc(C[NH3+])c3cccnc32)C1',
r'OC1(C(F)(F)F)[C@H]2CC[C@@H]1C[NH+](Cc1ccccc1)C2',
r'CCCN(CC)c1cc[nH+]c(C(=O)[O-])c1',
r'CCO[C@@H]1C[C@@H]([NH+](C)CCn2c(=O)oc3cccnc32)C12CCCC2',
r'CCNS(=O)(=O)c1ccccc1N[C@H](C)C[C@@H]1CCC[NH2+]1',
r'COc1ccc(/C=C/C2=[NH+]CCN2)cc1',
r'CO[C@@H](C[NH2+][C@H]1CCN(C[C@@H]2CCCO2)C[C@@H]1C)c1ccc(F)cc1',
r'CC(C)c1nsc(NC[C@H](C2CC2)[NH+](C)C)n1',
r'Cc1cccc(C(C)(C)CNC(=O)NC[C@@H](C)C[NH+]2CCN(C)CC2)c1',
r'CCNS(=O)(=O)[C@@H]1CCN(C(=O)NC2CC=CC2)C1',
r'COC(=O)[C@@H](N)CC(=O)OC(C)(C)C',
r'C[C@@]1(c2ccc(C[NH3+])cc2)NC(=O)NC1=O',
r'CCOC(=O)[C@H](F)[C@@]1(O)CCC[NH+](C(C)C)CC1',
r'C[C@@H]1CN(S(=O)(=O)[C@@H]2CCC[NH2+]C2)C[C@H]1C',
r'O=C1NC(=S)NC(=O)C1=CNc1ccc([N+](=O)[O-])cc1O',
r'C[NH+]1CCN(CCCNC(=O)C(=O)c2ccc(Br)cc2)CC1',
r'[NH3+][C@@H](CC(=O)[O-])c1cccc2ccccc12',
r'C[C@@H](C1CC1)[NH+](CC(=O)Nc1nccs1)C1CC1',
r'C/[NH+]=C(/NCc1ccc([N+]2=CCCC2)cc1)N[C@H]1CC[C@@H](SC)C1',
r'C[NH2+]C[C@@H]1C[C@@H]1c1c(F)cccc1Cl',
r'C[C@H]1N=C(CCNC(=O)CCC2=c3ccccc3=[NH+]C2)CS1',
r'C[C@@H]1CCCC[C@H]1NC(=O)Cn1cnc([N+](=O)[O-])n1',
r'C[NH+](Cc1c[nH]nc1-c1ccccc1)C[C@@H](O)CN1CCOCC1',
r'CC(C)=CC[NH+]1CCC2(CC1)CN(CCN1CC[NH+](C(C)C)CC1)C(=O)O2',
r'C[C@@H]([C@@H](O)c1ccc2ncnn2c1)[N+](=O)[O-]',
r'CC[NH+](CC)[C@@](C)(CC)[C@@H](C)O', r'CCc1cc(Cn2cc(N)nn2)n(C)n1',
r'CC[C@H](C)C[NH+](C)[C@@H]1CC[C@@](CO)([NH2+]C)C1',
r'CC[NH+]1CCC[C@@H]1CN(C)S(=O)(=O)c1cccc(C[NH3+])c1',
r'CCC[NH+](CCC)[C@@H]1CCC[C@H]([NH2+]C)C1',
r'CC[C@H](C)[C@H](NC(=O)N1CCCCC1)C(=O)[O-]',
r'CC[NH+](CC)[C@@H](C)CNC(=O)N1C[C@@H](C)c2ccccc21',
r'Cn1ncc(SCC(=O)[O-])c(Cl)c1=O', r'Cc1ccc([C@@]23CCC(=O)N2CCC[NH2+]3)cc1',
r'C[C@H]1C[C@@H](NCCc2nc3cc(F)ccc3n2C)C[NH+]1C',
r'C[C@@H]1C[NH+](CCN2C(=O)NC(C)(C)C2=O)C[C@@H](C)S1',
r'CCN(CCO)C(=O)N[C@H]1CC(=O)N(C(C)(C)C)C1',
r'CCn1nnc2c(=O)n(CC(=O)NC3CC3)cnc21',
r'CCCC(=O)N[C@@H]1CCC[NH+](Cc2ncccc2C)C1', r'COC[C@](C)(O)C1(C#N)CCCC1',
r'CCC[C@H](C)N(C)S(=O)(=O)c1ccc(CC(=O)[O-])s1',
r'CC1(C)[C@H]2OCC[C@@H]2[C@H]1NC(=O)CCNC(=O)C12CC3CC(CC(C3)C1)C2',
r'CCN(C(=O)Cn1nc2n(c1=O)CCCCC2)[C@H]1CCS(=O)(=O)C1',
r'Cc1ccc(S(=O)(=O)N[C@H]2C=C[C@@H](C(=O)[O-])C2)cc1F',
r'CC[NH2+][C@H](Cc1ncnn1CC)[C@@H]1CN(C)CCO1',
r'CC[NH2+][C@@]1(C(=O)[O-])CC[C@@H](Oc2cccc(OC)c2)C1',
r'Cn1cc(S(=O)(=O)N2CCN(C(=O)c3ccccc3O)CC2)cc1C(N)=O',
r'CC[C@H](C)[C@@H]1CCCC[C@H]([NH2+]C)C1',
r'Cc1ccc(C[NH2+][C@H](C)CN2CCOC2=O)nc1',
r'O=C(CS[C@H]1NN=C(C[C@@H]2CCS(=O)(=O)'
r'C2)O1)C1=c2ccccc2=[NH+][C@@H]1c1ccccc1',
r'CCOc1cc(CN2CC[NH+]3CCCC[C@@H]3C2)ccc1OC',
r'CC[NH+](CC)[C@](C)(CC)[C@H](O)c1cscc1Br',
r'C[NH+]1CCC(NC(=O)c2ncoc2-c2ccccc2)CC1',
r'CC(C)(C)C(=O)N1CCC[NH+](C[C@H](O)COCc2ccccc2Cl)CC1',
r'C[C@]12CC[C@H]3[C@@H](CC[C@]4(O)C[C@@H]'
r'(O)CC[C@]34C=O)[C@@]1(O)CC[C@H]2C1=CC(=O)OC1',
r'CCC[NH2+][C@@H]1CC[C@@H](C)C[C@@H]1C[NH+]1CCC(CC)(CC)C1',
r'O=C(CNC(=O)c1ccc[nH]c1=O)Nc1ccon1',
r'C[NH+]1CCC2(CC1)CNC(=N)N2c1ccc(Cl)c(Br)c1',
r'C[NH2+]C[C@H](C)C(=O)Nc1snc(C)c1C(=O)[O-]',
r'CC[NH+](CC)C[C@@H]1CCN(C(=O)Cc2cc3ccccc3[nH]c2=O)C1',
r'Cc1c(Cl)ccc2c1NC(=O)[C@@]21[C@@H]2C(=O)'
r'N(c3ccc(F)cc3)C(=O)[C@@H]2[C@@H]2CCC[NH+]21',
r'Cc1ncccc1NC(=O)C(=O)NCC[NH+](C)C(C)C',
r'CC[NH+]1CCN(C(=O)C2CC[NH+](Cc3ccccc3F)CC2)CC1',
r'C[C@H]([NH2+]Cc1nc(Cc2ccccc2)no1)[C@@H](C)n1cccn1',
r'CC(=O)N[C@@H]1CCCN(C(=O)C[NH+](Cc2ccc(C)s2)C(C)C)C1',
r'C/[NH+]=C(/NCCc1cc(C)cc(C)c1)NCc1ncc(C)s1',
r'CC[C@H](C)N1CCN(C(=O)CC(=O)[O-])CC1', r'COC(=O)[C@H](C#N)C(F)(F)F',
r'CCC(CC)([C@H](O)COC(C)C)[NH+](C)C',
r'Cc1nn(C)c(C)c1CNC(=O)[C@@H]1C[NH2+]C[C@H]1C',
r'CC[C@@H]1C[C@@H](C)CC[C@@H]1[NH2+]CCNS(C)(=O)=O',
r'C[C@@H]1CCCC[C@H]1[NH+]1CCC([NH2+][C@@H]2CCCCC[C@@H]2CO)CC1',
r'CC1(C)C[NH+](C[C@@H](O)COCc2ccco2)C1(C)C',
r'CCCC[NH+]1CCC(NC(=O)C2CC=CC2)CC1',
r'CCn1nc(C)c(CNC(=O)[C@H]2[NH+]=c3ccccc3=C2NC(=O)c2cccc(C)c2)c1C',
r'COc1ccc(OC)c(S(=O)(=O)n2cc3c(=O)n(C)c(=O)n(C)c3n2)c1',
r'C[C@H]1CCC[C@@H](C)N1C(=O)[C@@H]1COCCO1',
r'[NH3+][C@@H](CCCCC(=O)[O-])c1ccc2[nH]c(=O)[nH]c2c1',
r'C=CC[NH+](CC(=O)[O-])[C@H](C)c1ccc(F)cc1O',
r'C[C@@]1(C(=O)[O-])CCCCCC[C@@H]1O',
r'COCCN1C(=O)CC[C@@H]2C[NH+](Cc3cc(C)ccc3C)CC[C@@H]21',
r'C[NH+](CC(=O)NC(C)(C)C)C1CCC2(CC1)OCCO2',
r'CN(C(=O)c1nnn[n-]1)[C@H]1CCC[C@H]1C[NH3+]',
r'COc1ccc(C[NH+]2CC[C@]3(O)CCCC[C@@H]3C2)cc1O',
r'CC[C@H](C)[C@@H](NC(=O)N1CC(=O)Nc2ccccc21)C(=O)[O-]',
r'Cc1cc(C)n([C@H]2CCC[NH2+]C2)n1',
r'CC[NH2+][C@H](Cc1ccccc1Cl)[C@H]1C[NH+](C)CCN1C',
r'CN(Cc1[nH+]ccn1C)c1cccc(F)c1C(N)=[NH2+]',
r'Cc1nc(CC(=O)NCCC[NH+]2CCCC[C@H]2C)cs1',
r'CS(=O)(=O)N1C[C@H](C(N)=O)Oc2ccccc21',
r'C[C@H]([NH2+]CC(=O)N(C)C)c1ccc(Cl)s1',
r'C=CCN(CC=C)C[C@@H]1CCC(C)(C)[C@H]1[NH2+]C',
r'COC[C@H](C)Cc1nnc(C[NH3+])o1',
r'COC(=O)[C@H]1C[C@H]2CCCC[C@@H]2N1S(=O)(=O)N1CCOCC1',
r'CC[C@H](C)[NH+](C)CCNC(=O)c1ccc2c(c1)nc(C)n2C',
r'C[C@@H]1CCC/C(=N/[NH+]=C(/[S-])NCc2ccccc2)C1',
r'CCOC[C@@H]1CC[NH+](CC(=O)Nc2nc3ccc(F)cc3s2)C1',
r'C[NH+](CCNC(=O)c1cccn1-c1nnc(N2CCCC2)s1)C1CCCCC1',
r'CCC(CC)[S@](=O)CCC(=O)[O-]', r'CCOCCCc1cnc(C[NH2+]C)o1',
r'Cc1cc(CCC(N)=O)cc([C@H]2CCC[NH+]2Cc2cccnc2)n1',
r'C[NH+](CCc1ccccc1)Cc1c([C@@H]2CCC[NH2+]C2)[nH]c2ncccc12',
r'NC(=O)C[C@H](NC(=O)[C@@H]1CCC[NH2+]1)C(=O)[O-]',
r'C[C@@H]1C[C@@H](C)C[NH+](CCNS(=O)(=O)c2ccc(C#N)cc2)C1',
r'O=C([O-])c1cc(S(=O)(=O)[N-]c2ccc(F)cc2C(=O)[O-])c[nH]1',
r'C/C=C(\C)[C@@H]1C=C[C@@H]2C[C@H](C)C[C@H](C'
r')[C@@H]2[C@@H]1C(=O)C1=C([O-])[C@H](C[C@](C)(O)C(=O)[O-])NC1=O',
r'CC(C)[C@@H](C)NC(=O)CSCC[NH3+]',
r'CN(Cc1noc(C2CC2)n1)[C@@H]1CCN(Cc2nnnn2C2CC2)C1',
r'C/[NH+]=C(/NCCC[NH+]1CCC(C)CC1)NC(C)C',
r'COCCN(C)C(=O)[C@H]1CCC[NH+]([C@H](C)c2cccc([N+](=O)[O-])c2)C1',
r'Clc1ccc([C@H](Cc2ccccn2)[NH2+]C[C@H]2C[NH+]3CCN2CC3)cc1',
r'CNC(=O)CN(C)c1cc(C[NH3+])ccn1',
r'C[C@H]([NH3+])[C@@H](CC(=O)[O-])c1ccccc1',
r'Cc1ccc([O-])c(CN2CCN(C(=O)c3ccccc3O)CC2)[nH+]1',
r'CC[C@@H]1C[NH+]2CCCC[C@@H]2CN1c1cc(C)ccc1F',
r'O=S(=O)(/N=C(\[O-])c1ccsc1)N1CCCC1',
r'CCOc1cc(NC(=O)[C@@H]2C=C[C@H]([NH3+])C2)ccc1OC',
r'CC[NH+]1CCN(Cc2cc(C(=O)NC[C@@H](O)c3ccccc3)no2)CC1',
r'CC[NH2+][C@@H]1CCCC[C@H]1SC[C@@H](C)CO',
r'CC[C@H](C)[NH+](C)CC(=O)N1CCNCC1',
r'CCC(CC)n1ccc(C[C@@H]([NH3+])[C@H]2C[NH+](C)CCN2C)n1',
r'CC[C@@H]([NH3+])[C@H](c1cc(Br)cs1)N(C)CCS(C)(=O)=O',
r'Cc1cc(C)c2c(n1)oc1c(N3CC[NH+](CC(N)=O)CC3)ncnc12',
r'CC[NH+]1CC[C@@H](N(C)C(=O)Nc2cnccc2C)[C@H](C)C1',
r'O=C(C[NH+]1CCC(C(=O)c2ccc(Cl)cc2)CC1)NC[C@H]1COCCO1',
r'Cc1ccc(S(=O)(=O)N2CCN(C(=O)[C@H]3CCCC[C@@H]3C(=O)[O-])CC2)cc1C',
r'CC(C)Nc1cccc(CNC(=O)N[C@@H]2CC[NH+](CC3CC3)C2)c1',
r'CCc1cc(C(=O)N2CC[C@](O)(CN3CCOCC3)C(C)(C)C2)cc(=O)[nH]1',
r'CC[NH+](C)CCNC(=O)N1CCC(CC(=O)[O-])CC1',
r'CCn1c(C)cc(CNCC[NH+]2CCCCC2)c1C',
r'C[C@H]1CCC[NH+](Cc2ccc(CNC(=O)NNC(=O)C(C)(C)C)cc2)C1',
r'C/[NH+]=C(/NCc1noc(C(C)(C)C)n1)N[C@@H](C)c1ccc(F)cc1F',
r'CC(=O)N[C@@H](C(=O)NC1COC1)C(C)C',
r'C[C@H]1CCCN(C(=O)C2CC[NH+](C[C@@H](O)c3ccc(F)cc3F)CC2)C1',
r'C[C@@H](C(=O)NCc1cccs1)N1CCn2c(nn(C)c2=O)C1',
r'C[C@H]1CO[C@H]([C@]2(C)OC[C@H](C)O2)O1',
r'CN1CCC[NH+](Cc2cc(Cl)ccc2N)CC1',
r'C[C@@H](C[C@@H]1CCCCC[NH2+]1)[NH2+]CC(C)(C)C',
r'COC1CC[NH+](CCNC(=O)NCC(C)(C)c2ccncc2)CC1',
r'COCCNCC1=N[C@H]2C(=N1)C(=O)N(C)C(=O)N2C',
r'COCCN1C(=O)c2ccccc2[C@@H](C(=O)[O-])[C@H]1c1[nH+]ccn1C',
r'CNC(=O)c1ccc(O[C@@H]2CCC[C@H]([NH3+])C2)nn1',
r'C[NH+]1CCC(C[NH+](C)C2CCC(c3ccc(O)cc3)CC2)CC1',
r'CN(c1ncncc1N)[C@H]1CCSC1', r'SCCCCn1cc[nH+]c1',
r'Cc1cc2c(cc1C)O[C@@H](C(=O)N(CC[NH+](C)C)Cc1ccco1)C2',
r'O=C(NCC1([NH+]2CCCCC2)CCOCC1)c1n[nH]c(C2CC2)c1Cl',
r'NC1=c2cc(Br)ccc2=[NH+]C1', r'CC1(C)CCCC[C@H]1[NH2+]Cc1c[nH]cn1',
r'CC(C)(CBr)CN1C(=O)C(C)(C)S1(=O)=O',
r'COc1ccc(S(=O)(=O)NCC[NH2+][C@H]2CCOC3(CCOCC3)C2)cc1',
r'COC[C@@](C)(O)CNC(=O)C(=O)NCC(C)C', r'CCC1(CC)C(=O)NC(=NNC(N)=S)NC1=O',
r'c1nnn(C23C[C@H]4C[C@H](CC(c5nc6c7cn[nH]c7ncn6n5)(C4)C2)C3)n1',
r'CCN[C@H]1CCCC[C@@H]1[NH+](C)Cc1cccc(C)n1',
r'Cc1ccc(NC(=O)C[NH+](C)C[C@@H](O)CN2CCOCC2)cc1F',
r'C[C@@H]([NH2+]C[C@H]1CC[C@H](C(N)=O)O1)c1ccc2c(c1)OCCCO2',
r'CCOC(=O)[C@@H]([NH3+])[C@@H](O)c1ccc(S(C)(=O)=O)cc1',
r'Cc1ccc(NC(=O)C[NH+]2CCSC(C)(C)C2)c([N+](=O)[O-])c1',
r'CCC[NH2+][C@H](c1[nH]cc[nH+]1)[C@H]1CCOC2(CCSCC2)C1',
r'C[NH2+][C@@H](C1CCCC1)[C@@H]1CCc2cccnc21',
r'COc1ccc(-c2noc(C[NH+]3CCCC[C@@H]3C(=O)[O-])n2)cc1OC',
r'C[NH2+][C@H](COC(C)C)c1ncccn1',
r'NC(=O)[C@H]1CCC[NH+](CCCNc2nccc(C(F)(F)F)n2)C1',
r'C[C@H](CC(=O)N1CCC(C(=O)[O-])CC1)N1C(=O)c2ccccc2C1=O',
r'CCC[C@@H]1CC[C@H](C[NH3+])[C@H]([NH+]2C[C@@H](C)C[C@@H](C)C2)C1',
r'CCOCC[NH2+]Cc1c(C)nn(C)c1C',
r'O=C(C[C@@H]1C[NH2+]CCO1)N[C@H]1C=CS(=O)(=O)C1',
r'NC(=O)CN1CCN([C@H]2CC(=O)N(CCc3ccccc3)C2=O)CC1',
r'COCCCn1c(C)c(C)c(C#N)c1NC(=O)C[NH+]1CC(C)(C)C1(C)C',
r'C[C@@H]1[C@@H](C(=O)[O-])CCN1C(=O)NC[C@@H]1COCCO1',
r'CC1(C[NH2+][C@@H]2CCc3c(O)cccc32)COC1',
r'CC(=O)CN1N=C(C)[C@@H](C)n2c1nc1c2c(=O)n(C)c(=O)n1C',
r'CCc1nc2n(n1)C[C@H]([NH2+]CCNS(=O)(=O)c1ccccc1)CC2',
r'CC(C)n1cnnc1[C@H]1CCC[NH+](CC2CCC2)C1',
r'C[C@H](CC[NH+](C)C)NC(=O)Cc1cccc(Cl)c1',
r'CS(=O)(=O)NCC[NH2+]Cc1ccc(C(=O)[O-])cn1',
r'COc1ccc(S(=O)(=O)N2CCCN(CC[NH+](C)C)CC2)cc1',
r'CC1=C([C@H](c2cccc([N+](=O)[O-])c2)c2c(C)n[nH]c2[O-])[C@@H](O)N=N1',
r'COc1ccc(N2CCn3c2nn(CC(N)=O)c(=O)c3=O)cc1',
r'C[C@H](NC(=O)NCC1([NH+](C)C)CCCCC1)c1nncn1C',
r'CCc1ccc(S(=O)(=O)N[C@@H]2CCCC[C@@H]2C[NH+](C)C)cc1',
r'Cn1cc(C[NH+]2CCN(CCC(N)=O)CC2)c(-c2cccc(Cl)c2)n1',
r'CCc1cc(C(=O)NCc2cc3n(n2)CCCN(S(C)(=O)=O)C3)on1',
r'O=C(NNC(=O)[C@H]1C[C@H]2CC[C@@H]1C2)C1=NN=C(c2ccccc2)C1',
r'CCOc1cc(C[NH+]2CCC[C@H](C(=O)N(CC)CC)C2)ccc1O',
r'CN(C[C@H]1C=c2ccccc2=[NH+]C1=O)C(=O)c1ccccc1',
r'CCc1cccc(C)c1NC(=O)C[C@H]1C[NH2+]CCO1',
r'CC(C)[C@H](C)[NH2+][C@H]1CCOC2(CCCC2)C1',
r'C[C@@](N)(C(=O)N1CCCC[C@@H]1CCC(=O)[O-])C(F)(F)F',
r'CC(=O)N1CC[C@@H]([NH2+][C@H](C)c2ccc3[nH]c(=O)[nH]c3c2)C1',
r'Cc1cc([C@@H]([NH2+]CCN(C)C)C(=O)[O-])ccc1Br',
r'CCC[NH2+][C@@H](COC)[C@H]1CN(C(C)C)CCO1',
r'CC(C)Cn1ncc2cc(C(=O)N3CCOC[C@@H]3C(N)=O)cnc21',
r'CCN(CC)C(=O)CN1CCC([NH+]2CCCCC2)CC1',
r'C[C@H]([NH+](C)Cc1cnc(Cl)s1)C(C)(C)C',
r'CCOc1ccc(C[NH+]2CCC[C@H]([C@H](O)c3nccn3C)C2)cc1OC',
r'C[C@H]1CCCN(C(=O)CS[C@@H]2[NH+]=c3ccc(Cl)cc3=[NH+]2)C1',
r'CC(=O)N1CCC[C@H](C(=O)N(CC(=O)[O-])CC(F)(F)F)C1',
r'O=C1NC([O-])=C2C[NH+](Cc3ccco3)CN=C2N1c1ccccc1F',
r'CO[C@H]1CCCN(C(=O)NCCC[NH+]2CCCCC2)C1',
r'Cc1ccc(NC(=O)[C@H](C)[NH+](C)Cc2nnc(C3CC3)n2C)c(C)c1',
r'C[NH+](C)[C@H]1CC[C@H](NC(=O)N2CCN(Cc3ccncc3)CC2)C1',
r'NC1=C(N)C(=O)c2ncccc2C1=O', r'CC(C)[C@@H](NC(=O)[C@H]1CCCO1)C(=O)[O-]',
r'CCc1csc([C@H]2CCC[NH+](CC(=O)N(C)OC)C2)n1',
r'CC(C)c1ccc(CNC(N)=[NH2+])cc1', r'Cc1cc(F)cc(S(=O)(=O)N(C)CC[NH+](C)C)c1',
r'O=C1C(=O)N(CC[NH+]2CCOCC2)[C@@H](c2cccc([N+](=O)[O-])c2)/C1=C(\O)c1cccs1',
r'CN(C[C@@H]1CC[NH+](C)C1)C(=O)NCc1ccnc(OCC(F)F)c1',
r'CN1CCC[NH+](C[C@@H]2CN(C(=O)c3ccc(O)cc3)C[C@@H]2CO)CC1',
r'CC/[NH+]=C(/NCc1nc(C)no1)N[C@H]1CCN(c2ccccc2)C1',
r'COc1ccc(C(=O)C2=C([O-])C(=O)N(CC[NH+](C)C)[C@H]2c2ccc(Cl)cc2)cc1Cl',
r'[NH3+][C@H](CO)c1ccc(N2CCOCC2)c(Cl)c1Cl',
r'COc1ccc(CNC(=O)CNC(=O)[C@]2(C)CN(S(C)(=O)=O)CC(=O)N2C)cc1',
r'O=C(C[NH+]1CCC(CO)CC1)NCc1cc(Br)cs1',
r'C[C@@H](c1nc([C@H]2CSCCO2)no1)N1CC[NH2+]CC1',
r'C[C@@H](C(=O)N(C)C)[NH+](CC(=O)[O-])C(C)(C)C',
r'COCC[C@]1(C)O[C@]1(C(=O)OC)C(C)C', r'CCn1c(CC2CC[NH2+]CC2)nn(CCO)c1=O',
r'CC1(C)CCC[C@]2(C[NH+]=C(N)N2c2cccc(Br)c2)C1',
r'CCCCC[NH+]1CCN(C(=O)N(C)C)CC1',
r'CNS(=O)(=O)c1cccc([C@@H](C)[NH2+]C[C@H](C)SC)c1',
r'Cn1c(=O)c2nc(C[NH+]3CCCCC3)[nH]c2n(C)c1=O',
r'CCO[C@H]1C(=O)O[C@H]([C@@H](O)CO)C1=O',
r'CC(C)C[C@H](NC(N)=O)C(=O)N[C@@H](C(=O)[O-])C(C)C',
r'Cc1n[nH]c(/N=C(\[O-])CNC(=O)c2ccccc2F)n1',
r'CC[C@H](CSC)N(C)C(=O)[C@@H](C)N(C)c1nccn2cnnc12',
r'CC1(C)CCC[C@@H](C[NH+](CCO)C2CCCCC2)C1=O',
r'CC[C@]1(C)NN(c2ccccc2)C([S-])=[NH+]1',
r'COC(=O)[C@@H]1[C@H](CBr)N1N1C(=O)c2ccccc2C1=O',
r'CC1=C(C(=O)[O-])N2C(=O)[C@@H](NC(=O)c3c(Br)c(C)nn3C)[C@H]2SC1',
r'Cc1nc(C[NH+](C)[C@H](C)c2ccc(C(=O)[O-])o2)cs1',
r'COC(=O)[C@@H](NC(=O)Cn1cnnn1)c1ccc(Cl)c(F)c1',
r'O=C([O-])[C@H]1CCCN(c2ccc([O-])nn2)C1',
r'CC[C@H](CC[NH3+])N1CCCN(CC(F)(F)F)CC1',
r'CCC[NH2+]CC/C=C(/C)[C@@H]1CCOC2(CCSCC2)C1',
r'O=C(CN1CCN(C(=O)[C@H]2CC(=O)N(c3ccc4c(c3)OCCO4)C2)CC1)N1CCOCC1',
r'COc1ccc([C@@](C)([NH3+])Cc2[nH+]ccn2C)cc1',
r'Cc1cscc1C[NH2+]C[C@@H](O)C[NH+]1CCCC1',
r'COc1cc(Br)ccc1[C@H]([NH3+])C(=O)[O-]',
r'CCOc1ccc(CN2CC[NH2+][C@H](C(=O)[O-])C2)c(OCC)c1C',
r'CCn1cc(C[NH+]2CCc3c(F)cc(F)cc3C2)cn1',
r'O=S(=O)(C1CC1)N1CCC([NH2+]Cc2ccncc2)CC1',
r'CCCCS(=O)(=O)[N-]c1ccc(NC(=O)[C@H]2CCC[NH+](C)C2)cc1',
r'Cc1sc(=O)n(CCC(=O)NC2CC(C)(C)[NH2+]C(C)(C)C2)c1C',
r'O=C(NC[C@H]1CC[C@@H](C(=O)[O-])O1)c1ccc(Br)c(F)c1',
r'C=CC[NH2+]CC(=O)N[C@H](C)c1c(C)noc1C',
r'CC1(C)C[C@@H]1NC(=O)[C@@](C)(N)C(F)(F)F',
r'CCOc1ccccc1/C=C1\Oc2c(ccc([O-])c2C[NH+]2CCN(C)CC2)C1=O',
r'c1nc(CCN2CCC[NH+]3CCCC[C@@H]3C2)cs1',
r'COc1ccc(Cc2nc(C)c(CC(=O)[O-])c(=O)[nH]2)cc1OC',
r'C[C@@H]1C[C@]2(C[NH+]=C(N)N2c2ccc(Cl)cc2)CS1',
r'C[C@H](O)[C@H](C)[C@H](C(=O)[O-])c1ccccc1Br',
r'CCCCS[C@H]1CCC[C@@](CO)([NH2+]C(C)C)C1',
r'C[NH+](C)C1([C@@H](N)c2cnccn2)CCCCCC1',
r'O=C(N[C@H]1CCS(=O)(=O)C1)C1CC[NH2+]CC1',
r'N#CCN1CCN(CC(=O)NC(=O)NC2CCCC2)CC1',
r'CC(C)[C@@H](C)C(=O)Nc1cnn(CC[NH+]2CCCCC2)c1',
r'[NH3+][C@@H]1CCCCC[C@@H]1c1nnc2c3[nH]cnc3ncn12',
r'Cc1ccccc1CC[NH+]1CCC[C@@H](C[NH3+])C1',
r'C[C@@H]1[NH+]=c2ccccc2=C1CCN1C(=O)[C@H]2CCCC[C@@H]2C1=O',
r'Cc1cc2oc(=O)cc(C[NH+]3CCC[C@@H]3CS(N)(=O)=O)c2cc1C',
r'COc1cc(OC)cc(OCCN[C@@H]2C[C@H](C)[NH+](C)C[C@H]2C)c1',
r'CCS[C@H]1CC[C@@H](NC(=O)N(C)CCN(C)C2CC[NH+](C)CC2)C1',
r'CCOc1cc(C[NH+]2CCN(C3CC3)C(=O)C2)cc(Cl)c1OC',
r'CC(C)C[C@H](C[NH3+])CN1CC[NH+](CC2CC2)CC1',
r'C[NH+](CCS(C)(=O)=O)CC(=O)N1CCC[C@H]2CCCC[C@@H]21',
r'c1cc(CN2CC[NH+](Cc3ccc4c(c3)OCCO4)CC2)no1',
r'C[C@@H]([NH3+])c1ccccc1O[C@H]1CCO[C@]2(CCSC2)C1',
r'CCC[NH2+]C1CCC(O)(Cc2nc(C)cs2)CC1',
r'C[C@@H](C#N)CN(C)C(=O)C1[C@H]2CCC[C@@H]12',
r'CC(C)OC(=O)[C@H](C)CNC(=O)N[C@H]1CC[C@H]([NH+](C)C)C1',
r'CCCC12C[C@@H]3C[C@@H](CC([NH3+])(C3)C1)C2',
r'CNC(=O)[C@@H]1C[NH2+]CCN(C(C)=O)C1',
r'CN1CCc2cc([C@H](CNC(=O)C(=O)Nc3ccccc3F)[NH+](C)C)ccc21',
r'Cc1cc(N2CC[C@H](C)[C@H](O)C2)nc(C)[nH+]1',
r'CCOC(=O)CN1C(=O)CS[C@@]12C(=O)Nc1ccccc12',
r'C[NH+](C)[C@@H]1CC[C@H](NC(=O)C2CCN(CC(F)(F)F)CC2)C1',
r'Cc1cc(C)cc(C(=O)N[C@H](C)C(=O)N2CCC3(CC2)[NH2+]CCC2=NC=N[C@@H]23)c1',
r'Cc1cc([N+](=O)[O-])cnc1Nc1cnn(CC(=O)NCCO)c1',
r'CCC(CC)(NC(=O)N[C@H]1CCCNC1=O)C(=O)[O-]',
r'COc1ccc2c(c1)=C[C@H](CN(c1ccc(C)c(C)c1)S(C)(=O)=O)C(=O)[NH+]=2',
r'C[C@@H](CS(C)(=O)=O)[NH2+][C@H]1CCCOc2c(Br)cccc21',
r'C[C@H]1CC[C@H](C(=O)[O-])[C@H]([NH+]2CCN3CCC[C@@H]3C2)C1',
r'O=c1nnc(-c2ccc([N+](=O)[O-])o2)c([O-])[nH]1',
r'C[C@H]1CC[C@H](C(N)=O)CN1C(=O)Cn1ncc(=O)c2ccccc21',
r'C[C@H](NC(=O)[C@H](C)N1CC[NH+](CCCO)CC1)c1ccc2c(c1)CCCC2',
r'CCCN[C@@]1(C#N)CC[C@@H](n2cc[nH+]c2CCC)C1',
r'CCC[NH2+][C@@H]1CCC[C@H]1CC[NH+]1CCCC(C)(C)CC1',
r'CCn1cc[nH+]c1[C@@H]1CCCN(C(=O)CSCC[NH+]2CCCC2)C1',
r'CCOC(=O)C1(C)CC[NH+](C[C@@H](O)c2ccccc2C)CC1',
r'C[C@H]1C[C@@H](N(C)CC(=O)N2CCOCC2)CC[NH+]1C',
r'CCCC[C@@H](C)NC(=O)[C@H]1CCC[NH2+][C@@H]1C',
r'CN(C(=O)N[C@@](C)(C(=O)[O-])C(F)(F)F)c1ccc(F)cc1',
r'[NH3+][C@@H]1C=C[C@H](C(=O)N2CCC[C@@H]2C(=O)N2CCOCC2)C1',
r'O=C(c1ccc(F)cc1)[C@H]1CCC[NH+](Cc2c[nH]cn2)C1',
r'C=C(C)C[NH+]1CCN(CC(=O)N2CCCc3ccccc32)CC1',
r'CCC(CC)[NH+](C)CCC(=O)NC(N)=O',
r'CN(C)c1ccc([C@H](CNC(=O)C(=O)Nc2ccccc2C#N)N2CC[NH+](C)CC2)cc1',
r'CC[NH+](CC)CCN1C(N)=[NH+]C[C@@]12CCCC(C)(C)C2',
r'COc1ccc(CNC(=O)NC[C@@H](C)[NH+]2CCc3sccc3C2)cn1',
r'Cn1ncc2c1CCC[C@H]2[NH2+][C@H]1CCN(c2ccc(Cl)cc2)C1=O',
r'CC1(C)CCCC[C@H]1[NH+]1CCCC[C@@H]1CC[NH3+]',
r'CCN(Cc1ccccn1)[C@@H]1CCC[C@H]([NH2+]C)C1',
r'Cc1ccc(C[NH+]2CCN(c3nc4c(c(=O)[nH]c(=O)n4C)n3Cc3cccc(C)c3)CC2)cc1',
r'CC(C)(C)C[NH+]1CCN(Cc2cccc3cccnc23)C[C@@H]1CCO',
r'C[NH+]1CCC(NC(=O)C(=O)Nc2ccc(OC3CCCC3)cc2)CC1',
r'CCC[NH+]1CCCN(Cc2c(Cl)nc3ccccn23)CC1',
r'O=C(N[C@H]1C=C[C@H](C(=O)[O-])C1)c1cc(F)c(Cl)cc1Cl',
r'C[C@H]1[C@H](C(=O)[O-])CC[NH+]1CC(=O)NC(C)(C)C',
r'CNc1nc([C@H]2CCCN(C(=O)CCc3ccccc3)C2)[nH+]c2c1CC[NH+](C)C2',
r'COCCOc1cccc(C[NH+]2CCC2(C)C)c1', r'Cc1ccc([C@@H](C)NC2=[NH+]CCC2)cc1',
r'CC(C)NC(=O)NC(=O)[C@@H](C)N1CC[NH+](CCc2ccccc2)CC1',
r'CC(C)[C@@H](C[NH+](C)C)C(=O)[O-]', r'CN1C[NH+](C)CC2=C1NCNS2(=O)=O',
r'CCC[NH+](CCC)[C@@H]1CCC(=O)C1',
r'CC[NH+](CC)[C@H](C)CNC(=O)Nc1ccc2c(c1)NC(=O)[C@H](C)O2',
r'Cc1ccoc1C[NH+]1CC[C@@H](C)[C@H](C)C1',
r'CC(C)(C)[NH2+]Cc1ncoc1[C@H]1CCCCO1',
r'O=C([O-])C12C[C@@H]3C[C@H](C1)CC(n1cc([N+](=O)[O-])cn1)(C3)C2',
r'CC[C@H](C)[C@H](C)[NH2+]Cc1ncccc1F',
r'CC1(C)CCC(O)(C[NH2+][C@@H]2CCOC3(CCC3)C2)CC1',
r'C[C@@H]1CCC[NH+](CCCCNC(=O)Nc2ccccn2)C1',
r'CC[NH+]1CCC[C@@]2(CC1)C[NH+]=C(N)N2c1ccc(C)cc1',
r'CC1=C(C(=O)OCC(=O)C2=c3ccccc3=[NH+][C@@H]2C)[C@@H](C)N(C)N1',
r'CNC(=O)[C@H](C)CN(C)Cc1cc(=O)n2cccc(C)c2[nH+]1',
r'COc1ccc(Cl)cc1C[C@H]([NH3+])[C@H]1CN2CCC[C@@H]2CO1',
r'CC[S@](=O)[C@H]1CCCC[C@@H]1NC(=O)NC[C@H](O)c1ccco1',
r'CCOc1cc2c(cc1OCC)CN(C(=O)NC[C@@H]1CCC[NH+]1CC)CC2',
r'C[C@@H]1CCO[C@@H]1C(=O)N1CC[C@H](C(N)=O)c2ccccc21',
r'COC(=O)C[C@H](C)S(=O)(=O)C[C@@H]1CN(C)CCO1',
r'O=C(NCCC[NH+]1CCCC1)c1ccc2c(c1)NC(=O)[C@@H]1CCCCN21',
r'C[NH2+][C@@H]1CCC[C@H]([C@@H]2CCC[C@H](S(C)(=O)=O)C2)C1',
r'N#CCC[NH2+]C1(C(=O)[O-])CC1',
r'CC(=O)c1cccc(C[NH+]2CC[C@]3(CCC[NH+](Cc4cccc(C)c4)C3)C2)c1',
r'COC[C@@H](C)NC(=O)N[C@@H](C(N)=O)c1ccccc1',
r'C[C@@H]1CCC[C@@H]1[NH2+][C@@H]1CCCS[C@@H]1C',
r'NC(=O)CONC(=O)[C@H]1CCCc2sccc21',
r'CCn1c(=O)c2ccccc2n2c(CN3CC[C@H](C[NH+](C)CC)C3)nnc12',
r'CC[C@H]1C[C@H](C)CC[C@@H]1[NH2+][C@@H]1CCN(c2cc(C)nn2C)C1=O',
r'Cc1ccc(C(=O)NC[C@@H]2C[C@@H](O)C[NH+]2Cc2ccccc2)c(C)n1',
r'CC(C)CCc1noc(C[NH+](C)[C@H]2CCC[C@@H]2S(C)(=O)=O)n1',
r'CC(C)[C@@]1(CC2CCOCC2)CCC[NH2+]1',
r'CC[C@H](NC(=O)c1ccc(C#N)cn1)C(=O)N1CCOCC1',
r'CCC[NH+]1CCC(N(C)C(=O)NC[C@H]2CCCN(c3ncccn3)C2)CC1',
r'Cc1nc(-c2cccc(C(=O)N3C[C@@H]4[C@H](C3)C[NH+]3CCCC[C@H]43)c2)n[nH]1'
]
def num_long_cycles(mol):
"""Calculate the number of long cycles.
Args:
mol: Molecule. A molecule.
Returns:
negative cycle length.
"""
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if not cycle_list:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
return -cycle_length
def penalized_logp(molecule):
log_p = Descriptors.MolLogP(molecule)
sas_score = SA_Score.sascorer.calculateScore(molecule)
cycle_score = num_long_cycles(molecule)
return log_p - sas_score + cycle_score
class Molecule(molecules_mdp.Molecule):
"""Penalized LogP Molecule"""
def __init__(self, target_molecule, **kwargs):
"""Initializes the class.
Args:
target_molecule: SMILES string. the target molecule against which we
calculate the similarity.
**kwargs: The keyword arguments passed to the parent class.
"""
super(Molecule, self).__init__(**kwargs)
target_molecule = Chem.MolFromSmiles(target_molecule)
self._target_mol_fingerprint = self.get_fingerprint(target_molecule)
def get_fingerprint(self, molecule):
"""Gets the morgan fingerprint of the target molecule.
Args:
molecule: Chem.Mol. The current molecule.
Returns:
rdkit.ExplicitBitVect. The fingerprint of the target.
"""
return AllChem.GetMorganFingerprint(molecule, radius=2)
def get_similarity(self, molecule):
"""Gets the similarity between the current molecule and the target molecule.
Args:
molecule: String. The SMILES string for the current molecule.
Returns:
Float. The Tanimoto similarity.
"""
fingerprint_structure = self.get_fingerprint(molecule)
return DataStructs.TanimotoSimilarity(self._target_mol_fingerprint,
fingerprint_structure)
def _reward(self):
molecule = Chem.MolFromSmiles(self._state)
if molecule is None:
return -20.0
sim = self.get_similarity(molecule)
if sim <= FLAGS.sim_delta:
reward = penalized_logp(molecule) + 100 * (sim - FLAGS.sim_delta)
else:
reward = penalized_logp(molecule)
return reward * FLAGS.gamma**(self.max_steps - self._counter)
def get_fingerprint(smiles, hparams):
"""Get Morgan Fingerprint of a specific SMILES string.
Args:
smiles: String. The SMILES string of the molecule.
hparams: tf.HParams. Hyper parameters.
Returns:
np.array. shape = [hparams.fingerprint_length]. The Morgan fingerprint.
"""
if smiles is None:
return np.zeros((hparams.fingerprint_length,))
molecule = Chem.MolFromSmiles(smiles)
if molecule is None:
return np.zeros((hparams.fingerprint_length,))
fingerprint = AllChem.GetMorganFingerprintAsBitVect(
molecule, hparams.fingerprint_radius, hparams.fingerprint_length)
arr = np.zeros((1,))
# ConvertToNumpyArray takes ~ 0.19 ms, while
# np.asarray takes ~ 4.69 ms
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr
def run_training(hparams, dqn):
"""Runs the training procedure.
Briefly, the agent runs the action network to get an action to take in
the environment. The state transition and reward are stored in the memory.
Periodically the agent samples a batch of samples from the memory to
update(train) its Q network. Note that the Q network and the action network
share the same set of parameters, so the action network is also updated by
the samples of (state, action, next_state, reward) batches.
Args:
hparams: tf.HParams. The hyper parameters of the model.
dqn: An instance of the DeepQNetwork class.
Returns:
None
"""
summary_writer = tf.summary.FileWriter(FLAGS.model_dir)
tf.reset_default_graph()
with tf.Session() as sess:
dqn.build()
model_saver = tf.Saver(max_to_keep=hparams.max_num_checkpoints)
# The schedule for the epsilon in epsilon greedy policy.
exploration = schedules.PiecewiseSchedule(
[(0, 1.0), (int(FLAGS.num_episodes / 2), 0.1),
(FLAGS.num_episodes, 0.01)],
outside_value=0.01)
if hparams.prioritized:
memory = replay_buffer.PrioritizedReplayBuffer(hparams.replay_buffer_size,
hparams.prioritized_alpha)
beta_schedule = schedules.LinearSchedule(
FLAGS.num_episodes, initial_p=hparams.prioritized_beta, final_p=0)
else:
memory = replay_buffer.ReplayBuffer(hparams.replay_buffer_size)
sess.run(tf.global_variables_initializer())
sess.run(dqn.update_op)
global_step = 0
for episode in range(FLAGS.num_episodes):
for _ in range(800):
mol = random.choice(all_mols)
environment = Molecule(
target_molecule=mol,
atom_types=set(hparams.atom_types),
init_mol=mol,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=hparams.allow_bonds_between_rings,
allowed_ring_sizes=set(hparams.allowed_ring_sizes),
max_steps=hparams.max_steps_per_episode)
environment.initialize()
if hparams.num_bootstrap_heads:
head = np.random.randint(hparams.num_bootstrap_heads)
else:
head = 0
for step in range(hparams.max_steps_per_episode):
steps_left = (
hparams.max_steps_per_episode - environment.num_steps_taken)
valid_actions = list(environment.get_valid_actions())
observations = np.vstack([
np.append(get_fingerprint(act, hparams), steps_left)
for act in valid_actions
])
action = valid_actions[dqn.get_action(
observations,
head=head,
update_epsilon=exploration.value(episode))]
result = environment.step(action)
steps_left = (
hparams.max_steps_per_episode - environment.num_steps_taken)
action_fingerprints = np.vstack([
np.append(get_fingerprint(act, hparams), steps_left)
for act in environment.get_valid_actions()
])
# we store the fingerprint of the action in obs_t so action
# does not matter here.
memory.add(
obs_t=np.append(get_fingerprint(action, hparams), steps_left),
action=0,
reward=result.reward,
obs_tp1=action_fingerprints,
done=float(result.terminated))
if step == hparams.max_steps_per_episode - 1:
episode_summary = dqn.log_result(result.state, result.reward)
summary_writer.add_summary(episode_summary, global_step)
# reward can be a tuple or a float number.
logging.info(
'The SMILES string of the molecule generated: %s, '
'the reward is : %s', result.state, str(result.reward))
if (episode > 1) and (global_step % hparams.learning_frequency == 0):
if hparams.prioritized:
(state_t, _, reward_t, state_tp1, done_mask, weight,
indices) = memory.sample(
hparams.batch_size, beta=beta_schedule.value(episode))
else:
(state_t, _, reward_t, state_tp1,
done_mask) = memory.sample(hparams.batch_size)
weight = np.ones([reward_t.shape[0]])
# np.atleast_2d cannot be used here because a new dimension will
# be always added in the front and there is no way of changing this.
if reward_t.ndim == 1:
reward_t = np.expand_dims(reward_t, axis=1)
td_error, error_summary, _ = dqn.train(
states=state_t,
rewards=reward_t,
next_states=state_tp1,
done=np.expand_dims(done_mask, axis=1),
weight=np.expand_dims(weight, axis=1))
summary_writer.add_summary(error_summary, global_step)
logging.info('Current TD error: %.4f', np.mean(np.abs(td_error)))
if hparams.prioritized:
memory.update_priorities(
indices,
np.abs(np.squeeze(td_error) +
hparams.prioritized_epsilon).tolist())
global_step += 1
if (global_step + 1) % hparams.max_steps_per_episode * 5 == 0:
sess.run(dqn.update_op)
if (episode + 1) % 2 == 0:
model_saver.save(
sess,
os.path.join(FLAGS.model_dir, 'ckpt'),
global_step=global_step)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_training(
hparams=hparams,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
mol_dqn/experimental/optimize_800_mols.py
|
Python
|
apache-2.0
| 54,010
|
[
"RDKit"
] |
f81a4421d3414ac46c52be2fc1c963f748f16efce740b88d3ef606794055c32b
|
#
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for variable quantization
"""
import unittest
from rdkit.ML.Data import Quantize
from rdkit.six.moves import map
class TestCase(unittest.TestCase):
def testOneSplit1(self):
# """ simple case (clear division) """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 0), (1.4, 0), (1.6, 0), (2., 1), (2.1, 1), (2.2, 1),
(2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarQuantBound(varValues, resCodes, nPossibleRes)
target = (1.8, 0.97095)
self.assertEqual(
list(map(lambda x, y: Quantize.feq(x, y, 1e-4), res, target)), [1, 1],
'result comparison failed: %s != %s' % (res, target))
def testOneSplit2_noise(self):
# """ some noise """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 0), (1.4, 1), (1.6, 0), (2., 1), (2.1, 1), (2.2, 1),
(2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarQuantBound(varValues, resCodes, nPossibleRes)
target = (1.8, 0.60999)
self.assertEqual(
list(map(lambda x, y: Quantize.feq(x, y, 1e-4), res, target)), [1, 1],
'result comparison failed: %s != %s' % (res, target))
def testOneSplit3(self):
# """ optimal division not possibe """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 2), (1.4, 2), (1.6, 2), (2., 2), (2.1, 1), (2.2, 1),
(2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 3
res = Quantize.FindVarQuantBound(varValues, resCodes, nPossibleRes)
target = (1.3, 0.88129)
self.assertEqual(
list(map(lambda x, y: Quantize.feq(x, y, 1e-4), res, target)), [1, 1],
'result comparison failed: %s != %s' % (res, target))
def testOneSplit4_duplicates(self):
# """ lots of duplicates """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.2, 1), (1.4, 0), (1.4, 0), (1.6, 0), (2., 1), (2.1, 1),
(2.1, 1), (2.1, 1), (2.1, 1), (2.2, 1), (2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarQuantBound(varValues, resCodes, nPossibleRes)
target = (1.8, 0.68939)
self.assertEqual(
list(map(lambda x, y: Quantize.feq(x, y, 1e-4), res, target)), [1, 1],
'result comparison failed: %s != %s' % (res, target))
def testOneSplit5_outOfOrder(self):
# """ same as testOneSplit1 data, but out of order """
d = [(1., 0), (1.1, 0), (2.2, 1), (1.2, 0), (1.6, 0), (1.4, 0), (2., 1), (2.1, 1), (1.4, 0),
(2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarQuantBound(varValues, resCodes, nPossibleRes)
target = (1.8, 0.97095)
self.assertEqual(
list(map(lambda x, y: Quantize.feq(x, y, 1e-4), res, target)), [1, 1],
'result comparison failed: %s != %s' % (res, target))
def testMultSplit1_simple_dual(self):
# """ simple dual split """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 2), (1.4, 2), (1.6, 2), (2., 2), (2.1, 1), (2.1, 1),
(2.1, 1), (2.2, 1), (2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 3
res = Quantize.FindVarMultQuantBounds(varValues, 2, resCodes, nPossibleRes)
target = ([1.3, 2.05], 1.55458)
self.assertEqual(
min(map(lambda x, y: Quantize.feq(x, y, 1e-4), res[0], target[0])), 1,
'split bound comparison failed: %s != %s' % (res[0], target[0]))
self.assertTrue(
Quantize.feq(res[1], target[1], 1e-4),
'InfoGain comparison failed: %s != %s' % (res[1], target[1]))
def testMultSplit2_outOfOrder(self):
# """ same test as testMultSplit1, but out of order """
d = [(1., 0), (2.1, 1), (1.1, 0), (1.2, 0), (1.4, 2), (1.6, 2), (2., 2), (1.4, 2), (2.1, 1),
(2.2, 1), (2.1, 1), (2.3, 1)]
varValues, resCodes = zip(*d)
nPossibleRes = 3
res = Quantize.FindVarMultQuantBounds(varValues, 2, resCodes, nPossibleRes)
target = ([1.3, 2.05], 1.55458)
self.assertTrue(
Quantize.feq(res[1], target[1], 1e-4),
'InfoGain comparison failed: %s != %s' % (res[1], target[1]))
self.assertEqual(
min(map(lambda x, y: Quantize.feq(x, y, 1e-4), res[0], target[0])), 1,
'split bound comparison failed: %s != %s' % (res[0], target[0]))
def testMultSplit3_4results(self):
# """ 4 possible results """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 2), (1.4, 2), (1.6, 2), (2., 2), (2.1, 1), (2.1, 1),
(2.1, 1), (2.2, 1), (2.3, 1), (3.0, 3), (3.1, 3), (3.2, 3), (3.3, 3)]
varValues, resCodes = zip(*d)
nPossibleRes = 4
res = Quantize.FindVarMultQuantBounds(varValues, 3, resCodes, nPossibleRes)
target = ([1.30, 2.05, 2.65], 1.97722)
self.assertTrue(
Quantize.feq(res[1], target[1], 1e-4),
'InfoGain comparison failed: %s != %s' % (res[1], target[1]))
self.assertEqual(
min(map(lambda x, y: Quantize.feq(x, y, 1e-4), res[0], target[0])), 1,
'split bound comparison failed: %s != %s' % (res[0], target[0]))
def testMultSplit4_dualValued_island(self):
# """ dual valued, with an island """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 1), (1.4, 1), (1.6, 1), (2., 1), (2.1, 0), (2.1, 0),
(2.1, 0), (2.2, 0), (2.3, 0)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarMultQuantBounds(varValues, 2, resCodes, nPossibleRes)
target = ([1.3, 2.05], .91830)
self.assertTrue(
Quantize.feq(res[1], target[1], 1e-4),
'InfoGain comparison failed: %s != %s' % (res[1], target[1]))
self.assertEqual(
min(map(lambda x, y: Quantize.feq(x, y, 1e-4), res[0], target[0])), 1,
'split bound comparison failed: %s != %s' % (res[0], target[0]))
def testMultSplit5_dualValued_island_noisy(self):
# """ dual valued, with an island, a bit noisy """
d = [(1., 0), (1.1, 0), (1.2, 0), (1.4, 1), (1.4, 0), (1.6, 1), (2., 1), (2.1, 0), (2.1, 0),
(2.1, 0), (2.2, 1), (2.3, 0)]
varValues, resCodes = zip(*d)
nPossibleRes = 2
res = Quantize.FindVarMultQuantBounds(varValues, 2, resCodes, nPossibleRes)
target = ([1.3, 2.05], .34707)
self.assertTrue(
Quantize.feq(res[1], target[1], 1e-4),
'InfoGain comparison failed: %s != %s' % (res[1], target[1]))
self.assertEqual(
min(map(lambda x, y: Quantize.feq(x, y, 1e-4), res[0], target[0])), 1,
'split bound comparison failed: %s != %s' % (res[0], target[0]))
def test9NewSplits(self):
d = [(0, 0),
(1, 1),
(2, 0), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 2], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 2], str(res))
d = [(0, 1),
(1, 0),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 2], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 2], str(res))
d = [(0, 0),
(0, 0),
(1, 1),
(1, 1),
(2, 0),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
d = [(0, 0),
(0, 1),
(1, 1),
(1, 1),
(2, 0),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
d = [(0, 0),
(0, 0),
(1, 0),
(1, 1),
(2, 0),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
d = [(0, 0),
(0, 0),
(1, 0),
(1, 0),
(2, 1),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [4], str(res))
d = [(0, 0),
(0, 0),
(1, 1),
(1, 1),
(2, 1),
(2, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2], str(res))
d = [(0, 0),
(0, 0),
(1, 0),
(1, 0),
(2, 0),
(2, 0), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [], str(res))
d = [(0, 0),
(0, 1),
(1, 0),
(1, 1),
(2, 0),
(2, 0), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [2, 4], str(res))
d = [(1, 0),
(2, 1),
(2, 1),
(3, 1),
(3, 1),
(3, 1),
(4, 0),
(4, 1),
(4, 1), ]
varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 6], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 6], str(res))
d = [(1, 1.65175902843, 0), (2, 1.89935600758, 0), (3, 1.89935600758, 1), (4, 1.89935600758, 1),
(5, 2.7561609745, 1), (6, 2.7561609745, 1), (7, 2.7561609745, 1), (8, 2.7561609745, 1),
(9, 3.53454303741, 1), (10, 3.53454303741, 1), (11, 3.53454303741, 1),
(12, 3.53454303741, 1), (13, 3.53454303741, 1)]
_, varValues, resCodes = zip(*d)
res = Quantize._NewPyFindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 4], str(res))
res = Quantize._FindStartPoints(varValues, resCodes, len(d))
self.assertTrue(res == [1, 4], str(res))
def testGithubIssue18(self):
d = [0, 1, 2, 3, 4]
a = [0, 0, 1, 1, 1]
_ = Quantize.FindVarMultQuantBounds(d, 1, a, 2)
d2 = [(x, ) for x in d]
self.assertRaises(ValueError, lambda: Quantize.FindVarMultQuantBounds(d2, 1, a, 2))
self.assertRaises(ValueError, lambda: Quantize._FindStartPoints(d2, a, len(d2)))
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/ML/Data/UnitTestQuantize.py
|
Python
|
bsd-3-clause
| 10,913
|
[
"RDKit"
] |
c26a131ba442d42bfd2b4f29795da7c4267f647ee6dac360c4cb349431e6d442
|
"""Utilities that can be used by tests."""
import difflib
import re
import sys
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
from gi.repository import Gio
from gi.repository import Gdk
from gi.repository import Gtk
from macaroon.playback import *
testLogger = Gio.DBusProxy.new_for_bus_sync(
Gio.BusType.SESSION,
Gio.DBusProxyFlags.NONE,
None,
'org.gnome.Orca',
'/org/gnome/Orca',
'org.gnome.Orca.Logger',
None)
enable_assert = \
environ.get('HARNESS_ASSERT', 'yes') in ('yes', 'true', 'y', '1', 1)
errFilename = environ.get('HARNESS_ERR', None)
outFilename = environ.get('HARNESS_OUT', None)
if errFilename and len(errFilename):
myErr = open(errFilename, 'a', 0)
else:
myErr = sys.stderr
if outFilename and len(outFilename):
if outFilename == errFilename:
myOut = myErr
else:
myOut = open(outFilename, 'a', 0)
else:
myOut = sys.stdout
def getKeyCodeForName(name):
keymap = Gdk.Keymap.get_default()
success, entries = keymap.get_entries_for_keyval(Gdk.keyval_from_name(name))
if success:
return entries[-1].keycode
return None
def setClipboardText(text):
clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False))
clipboard.set_text(text, -1)
class StartRecordingAction(AtomicAction):
'''Tells Orca to log speech and braille output to a string which we
can later obtain and use in an assertion (see AssertPresentationAction)'''
def __init__(self):
if enable_assert:
AtomicAction.__init__(self, 1000, self._startRecording)
else:
AtomicAction.__init__(self, 0, lambda: None)
def _startRecording(self):
testLogger.startRecording()
def __str__(self):
return 'Start Recording Action'
def assertListEquality(rawOrcaResults, expectedList):
'''Convert raw speech and braille output obtained from Orca into a
list by splitting it at newline boundaries. Compare it to the
list passed in and return the actual results if they differ.
Otherwise, return None to indicate an equality.'''
results = rawOrcaResults.strip().split("\n")
# Shoot for a string comparison first.
#
if results == expectedList:
return None
elif len(results) != len(expectedList):
return results
# If the string comparison failed, do a regex match item by item
#
for i in range(0, len(expectedList)):
if results[i] == expectedList[i]:
continue
else:
expectedResultRE = re.compile(expectedList[i])
if expectedResultRE.match(results[i]):
continue
else:
return results
return None
class AssertPresentationAction(AtomicAction):
'''Ask Orca for the speech and braille logged since the last use
of StartRecordingAction and apply an assertion predicate.'''
totalCount = 0
totalSucceed = 0
totalFail = 0
totalKnownIssues = 0
def __init__(self, name, expectedResults,
assertionPredicate=assertListEquality):
'''name: the name of the test
expectedResults: the results we want (typically a list of strings
that can be treated as regular expressions)
assertionPredicate: method to compare actual results to expected
results
'''
# [[[WDW: the pause is to wait for Orca to process an event.
# Probably should think of a better way to do this.]]]
#
if enable_assert:
AtomicAction.__init__(self, 1000, self._stopRecording)
self._name = sys.argv[0] + ":" + name
self._expectedResults = expectedResults
self._assertionPredicate = assertionPredicate
AssertPresentationAction.totalCount += 1
self._num = AssertPresentationAction.totalCount
else:
AtomicAction.__init__(self, 0, lambda: None)
def printDiffs(self, results):
"""Compare the expected results with the actual results and print
out a set of diffs.
Arguments:
- results: the actual results.
Returns an indication of whether this test was expected to fail.
"""
knownIssue = False
print("DIFFERENCES FOUND:", file=myErr)
if isinstance(self._expectedResults, [].__class__):
for result in self._expectedResults:
if result.startswith("KNOWN ISSUE") \
or result.startswith("BUG?"):
knownIssue = True
else:
if self._expectedResults.startswith("KNOWN ISSUE") \
or self._expectedResults.startswith("BUG?"):
knownIssue = True
d = difflib.Differ()
try:
# This can stack trace for some odd reason (UTF-8 characters?),
# so we need to capture it. Otherwise, it can hang the tests.
#
diffs = list(d.compare(self._expectedResults, results))
print('\n'.join(list(diffs)), file=myErr)
except:
print("(ERROR COMPUTING DIFFERENCES!!!)", file=myErr)
for i in range(0, max(len(results), len(self._expectedResults))):
try:
print(" EXPECTED: %s" \
% self._expectedResults[i].decode("UTF-8", "replace"), file=myErr)
except:
pass
try:
print(" ACTUAL: %s" \
% results[i].decode("UTF-8", "replace"), file=myErr)
except:
pass
return knownIssue
def _stopRecording(self):
result = testLogger.stopRecording()
results = self._assertionPredicate(result, self._expectedResults)
if not results:
AssertPresentationAction.totalSucceed += 1
print("Test %d of %d SUCCEEDED: %s" \
% (self._num,
AssertPresentationAction.totalCount,
self._name), file=myOut)
else:
AssertPresentationAction.totalFail += 1
print("Test %d of %d FAILED: %s" \
% (self._num,
AssertPresentationAction.totalCount,
self._name), file=myErr)
knownIssue = self.printDiffs(results)
if knownIssue:
AssertPresentationAction.totalKnownIssues += 1
print('[FAILURE WAS EXPECTED - ' \
'LOOK FOR KNOWN ISSUE OR BUG? ' \
'IN EXPECTED RESULTS]', file=myErr)
else:
print('[FAILURE WAS UNEXPECTED]', file=myErr)
def __str__(self):
return 'Assert Presentation Action: %s' % self._name
class AssertionSummaryAction(AtomicAction):
'''Output the summary of successes and failures of
AssertPresentationAction assertions.'''
def __init__(self):
AtomicAction.__init__(self, 0, self._printSummary)
def _printSummary(self):
print("SUMMARY: %d SUCCEEDED and %d FAILED (%d UNEXPECTED) of %d for %s"\
% (AssertPresentationAction.totalSucceed,
AssertPresentationAction.totalFail,
(AssertPresentationAction.totalFail \
- AssertPresentationAction.totalKnownIssues),
AssertPresentationAction.totalCount,
sys.argv[0]), file=myOut)
def __str__(self):
return 'Start Recording Action'
|
GNOME/orca
|
test/harness/utils.py
|
Python
|
lgpl-2.1
| 7,648
|
[
"ORCA"
] |
4b4f125f8e437f58db0206641de214d950b6540e900d804fd288f3cc2123e49d
|
import json
import os
import os.path
import re
import datetime
from os.path import exists, isdir, realpath, isfile, islink
from os import pathsep, listdir, environ, fdopen
import subprocess
import GangaCore.Utility.logging
import GangaCore.Utility.Config
from optparse import OptionParser, OptionValueError
from GangaCore.Utility.Config.Config import _after_bootstrap
from GangaCore.Utility.logging import getLogger
from GangaCore.Runtime.GPIexport import exportToGPI
from GangaCore.Utility.execute import execute
from GangaCore.GPIDev.Credentials.CredentialStore import credential_store
from GangaDirac.Lib.Credentials.DiracProxy import DiracProxy
from GangaLHCb.Utility.LHCbDIRACenv import store_dirac_environment
logger = getLogger()
def guessPlatform():
defaultPlatform = 'x86_64-centos7-gcc8-opt'
cmd = '. /cvmfs/lhcb.cern.ch/lib/LbEnv &> /dev/null && python3 -c "import json, os; print(json.dumps(dict(os.environ.copy())))"'
env = execute(cmd)
if isinstance(env, str):
try:
env = json.loads(env)
except Exception:
logger.debug("Unable to extract platform - using default platform: %s" % defaultPlatform)
return defaultPlatform
if 'CMTCONFIG' in env.keys():
defaultPlatform = env['CMTCONFIG']
logger.debug("Setting the default application platform to %s" % defaultPlatform)
else:
logger.debug("Unable to extract platform - using default platform: %s" % defaultPlatform)
return defaultPlatform
if not _after_bootstrap:
configLHCb = GangaCore.Utility.Config.makeConfig('LHCb', 'Parameters for LHCb')
# Set default values for the LHCb config section.
dscrpt = 'The name of the local site to be used for resolving LFNs into PFNs.'
configLHCb.addOption('LocalSite', '', dscrpt)
dscrpt = 'Files from these services will go to the output sandbox (unless \
overridden by the user in a specific job via the Job.outputfiles field). Files \
from all other known handlers will go to output data (unless overridden by \
the user in a specific job via the Job.outputfiles field).'
configLHCb.addOption('outputsandbox_types',
['CounterSummarySvc', 'NTupleSvc',
'HistogramPersistencySvc', 'MicroDSTStream',
'EvtTupleSvc'], dscrpt)
dscrpt = 'The string that is added after the filename in the options to tell' \
' Gaudi how to read the data. This is the default value used if the '\
'file name does not match any of the patterns in '\
'datatype_string_patterns.'
configLHCb.addOption('datatype_string_default',
"""TYP='POOL_ROOTTREE' OPT='READ'""", dscrpt)
dscrpt = 'If a file matches one of these patterns, then the string here '\
'overrides the datatype_string_default value.'
defval = {"SVC='LHCb::MDFSelector'": ['*.raw', '*.RAW', '*.mdf', '*.MDF']}
configLHCb.addOption('datatype_string_patterns', defval, dscrpt)
configLHCb.addOption('UserAddedApplications', "", "List of user added LHCb applications split by ':'")
configLHCb.addOption('SplitByFilesBackend', 'OfflineGangaDiracSplitter',
'Possible SplitByFiles backend algorithms to use to split jobs into subjobs,\
options are: GangaDiracSplitter, OfflineGangaDiracSplitter, \
splitInputDataBySize and splitInputData')
defaultLHCbDirac = 'prod'
configLHCb.addOption('LHCbDiracVersion', defaultLHCbDirac, 'set LHCbDirac version')
defaultPlatform = guessPlatform()
configLHCb.addOption('defaultPlatform', defaultPlatform, 'The default platform for applications to use')
def _store_root_version():
if 'ROOTSYS' in os.environ:
vstart = os.environ['ROOTSYS'].find('ROOT/') + 5
vend = os.environ['ROOTSYS'][vstart:].find('/')
rootversion = os.environ['ROOTSYS'][vstart:vstart + vend]
os.environ['ROOTVERSION'] = rootversion
else:
msg = 'Tried to setup ROOTVERSION environment variable but no ROOTSYS variable found.'
raise OptionValueError(msg)
if not _after_bootstrap:
store_dirac_environment()
# _store_root_version()
def standardSetup():
from . import PACKAGE
PACKAGE.standardSetup()
def loadPlugins(config=None):
logger.debug("Importing Backends")
from .Lib import Backends
logger.debug("Importing Applications")
from .Lib import Applications
logger.debug("Importing LHCbDataset")
from .Lib import LHCbDataset
logger.debug("Importing Mergers")
from .Lib import Mergers
logger.debug("Importing RTHandlers")
from .Lib import RTHandlers
logger.debug("Importing Splitters")
from .Lib import Splitters
logger.debug("Importing Tasks")
from .Lib import Tasks
logger.debug("Importing Files")
from .Lib import Files
logger.debug("Importing Checkers")
from .Lib import Checkers
logger.debug("Importing LHCbTasks")
from .Lib import Tasks
logger.debug("Finished Importing")
def postBootstrapHook():
configDirac = GangaCore.Utility.Config.getConfig('DIRAC')
configOutput = GangaCore.Utility.Config.getConfig('Output')
configPoll = GangaCore.Utility.Config.getConfig('PollThread')
configProxy = GangaCore.Utility.Config.getConfig('defaults_DiracProxy')
configDirac.setSessionValue('DiracEnvJSON', os.environ['GANGADIRACENVIRONMENT'])
configDirac.setSessionValue('userVO', 'lhcb')
configDirac.setSessionValue('allDiracSE', ['CERN-USER', 'CNAF-USER', 'GRIDKA-USER', 'RRCKI-USER',
'IN2P3-USER', 'SARA-USER', 'PIC-USER', 'RAL-USER'])
configDirac.setSessionValue('noInputDataBannedSites', [])
configDirac.setSessionValue('RequireDefaultSE', False)
configDirac.setSessionValue('proxyInitCmd', 'lhcb-proxy-init')
configDirac.setSessionValue('proxyInfoCmd', 'dirac-proxy-info')
configOutput.setSessionValue('FailJobIfNoOutputMatched', 'False')
configPoll.setSessionValue('autoCheckCredentials', False)
configProxy.setSessionValue('group', 'lhcb_user')
configProxy.setSessionValue('encodeDefaultProxyFileName', False)
# This is being dropped from 6.1.0 due to causing some bug in loading large numbers of jobs
#
# This will be nice to re-add once there is lazy loading support passed to the display for the 'jobs' command 09/2015 rcurrie
#
#from GangaCore.GPIDev.Lib.Registry.JobRegistry import config as display_config
#display_config.setSessionValue( 'jobs_columns', ('fqid', 'status', 'name', 'subjobs', 'application', 'backend', 'backend.actualCE', 'backend.extraInfo', 'comment') )
#display_config.setSessionValue( 'jobs_columns_functions', {'comment': 'lambda j: j.comment', 'backend.extraInfo': 'lambda j : j.backend.extraInfo ', 'subjobs': 'lambda j: len(j.subjobs)', 'backend.actualCE': 'lambda j:j.backend.actualCE', 'application': 'lambda j: j.application._name', 'backend': 'lambda j:j.backend._name'} )
#display_config.setSessionValue('jobs_columns_width', {'fqid': 8, 'status': 10, 'name': 10, 'application': 15, 'backend.extraInfo': 30, 'subjobs': 8, 'backend.actualCE': 17, 'comment': 20, 'backend': 15} )
from GangaCore.Core.GangaThread.WorkerThreads import getQueues
queue = getQueues()
if queue is not None:
queue.add(updateCreds)
else:
updateCreds()
def updateCreds():
try:
for group in ('lhcb_user', ):
if group == 'lhcb_user':
credential_store[DiracProxy(group=group, encodeDefaultProxyFileName=False)]
credential_store[DiracProxy(group=group)]
except KeyError:
pass
class gridProxy(object):
"""
This is a stub class which wraps functions from the `credential_store` sentinal to familiar functions from Ganga 6.2 and prior
"""
@classmethod
def renew(cls):
"""
This method is similar to calling::
credential_store.create(DiracProxy())
or::
credential_store[DiracProxy()].renew()
as appropriate.
"""
from GangaCore.GPI import credential_store, DiracProxy
try:
cred = credential_store[DiracProxy()]
if not cred.is_valid():
cred.create()
except KeyError:
credential_store.create(DiracProxy())
@classmethod
def create(cls):
"""
This is a wrapper for::
credential_store.create(DiracProxy())
"""
cls.renew()
@classmethod
def destroy(cls):
"""
This is a wrapper for::
credential_store[DiracProxy()].destroy()
"""
from GangaCore.GPI import credential_store, DiracProxy
try:
cred = credential_store[DiracProxy()]
cred.destroy()
except KeyError:
pass
exportToGPI('gridProxy', gridProxy, 'Functions')
|
ganga-devs/ganga
|
ganga/GangaLHCb/__init__.py
|
Python
|
gpl-3.0
| 8,963
|
[
"DIRAC"
] |
b9a0407905ed4156a2d239f532912e7f396e1c0b3740f9964b50938999344ed5
|
import datetime
import pytest
import pytz
from events.models import (
Location, Time,
CustomEvent, KeynoteEvent, ProposedTalkEvent,
)
from proposals.models import AdditionalSpeaker
cst = pytz.timezone('Asia/Taipei')
class RendererTestUtils:
@staticmethod
def is_safe(s):
"""Check whether a string is safe.
This is Django's internal API, but we exploit it for easy testing.
"""
return not s or hasattr(s, '__html__')
@pytest.fixture
def utils():
return RendererTestUtils
@pytest.fixture
def keynote_belt_event(db, get_time):
return KeynoteEvent.objects.create(
speaker_name='Amber Brown',
slug='amber-brown',
begin_time=get_time('2016-06-05 9:00'),
end_time=get_time('2016-06-05 10:00'),
location=Location.ALL,
)
@pytest.fixture
def custom_partial_belt_event(db, get_time):
return CustomEvent.objects.create(
title='Job Fair',
begin_time=get_time('2016-06-04 14:45'),
end_time=get_time('2016-06-04 15:15'),
location=Location.R012,
)
@pytest.fixture
def proposed_talk_block_event(accepted_talk_proposal, another_user, get_time):
e = ProposedTalkEvent.objects.create(
proposal=accepted_talk_proposal,
begin_time=get_time('2016-06-03 16:00'),
end_time=get_time('2016-06-03 16:45'),
location=Location.R0,
)
AdditionalSpeaker.objects.create(
user=another_user, proposal=accepted_talk_proposal,
)
return e
@pytest.fixture
def events(
custom_partial_belt_event, keynote_belt_event,
proposed_talk_block_event, sponsored_block_event):
return {
'custom_event': custom_partial_belt_event,
'keynote_event': keynote_belt_event,
'proposed_talk_event': proposed_talk_block_event,
'sponsored_event': sponsored_block_event,
}
@pytest.fixture
def day():
return datetime.date(2016, 8, 19)
@pytest.fixture
def make_time(day):
def _make_time(h, m=0):
dt = datetime.datetime.combine(day, datetime.time(h, m))
return Time(value=cst.localize(dt))
return _make_time
@pytest.fixture
def belt_begin_time(make_time):
return make_time(15)
@pytest.fixture
def belt_end_time(make_time):
return make_time(16)
@pytest.fixture
def belt_event(belt_begin_time, belt_end_time):
return KeynoteEvent(
speaker_name='Amber Brown',
slug='amber-brown',
begin_time=belt_begin_time,
end_time=belt_end_time,
)
@pytest.fixture
def partial_belt_begin_time(make_time):
return make_time(1)
@pytest.fixture
def partial_belt_end_time(make_time):
return make_time(2)
@pytest.fixture
def partial_belt_events(partial_belt_begin_time, partial_belt_end_time):
event = CustomEvent(
title='Refreshment',
location=Location.R012,
begin_time=partial_belt_begin_time,
end_time=partial_belt_end_time,
)
return [event]
@pytest.fixture
def partial_belt_block_begin_time(make_time):
return make_time(3)
@pytest.fixture
def partial_belt_block_end_time(make_time):
return make_time(4)
@pytest.fixture
def partial_belt_block_events(
partial_belt_block_begin_time, partial_belt_block_end_time):
events = [
CustomEvent(
title='Refreshment',
location=Location.R012,
begin_time=partial_belt_block_begin_time,
end_time=partial_belt_block_end_time,
),
CustomEvent(
title='Free-market sub-orbital tattoo',
location=Location.R3,
begin_time=partial_belt_block_begin_time,
end_time=partial_belt_block_end_time,
),
]
return events
@pytest.fixture
def partial_block_begin_time(make_time):
return make_time(5)
@pytest.fixture
def partial_block_end_time(make_time):
return make_time(6)
@pytest.fixture
def partial_block_events(partial_block_begin_time, partial_block_end_time):
events = [
CustomEvent(
title='Boost Maintainability',
location=Location.R0,
begin_time=partial_block_begin_time,
end_time=partial_block_end_time,
),
CustomEvent(
title='We Made the PyCon TW 2016 Website',
location=Location.R1,
begin_time=partial_block_begin_time,
end_time=partial_block_end_time,
),
CustomEvent(
title='Deep Learning and Application in Python',
location=Location.R2,
begin_time=partial_block_begin_time,
end_time=partial_block_end_time,
),
]
return events
@pytest.fixture
def block_begin_time(make_time):
return make_time(7)
@pytest.fixture
def block_end_time(make_time):
return make_time(8)
@pytest.fixture
def block_events(block_begin_time, block_end_time):
events = [
CustomEvent(
title='Boost Maintainability',
location=Location.R0,
begin_time=block_begin_time,
end_time=block_end_time,
),
CustomEvent(
title='We Made the PyCon TW 2016 Website',
location=Location.R1,
begin_time=block_begin_time,
end_time=block_end_time,
),
CustomEvent(
title='Deep Learning and Application in Python',
location=Location.R2,
begin_time=block_begin_time,
end_time=block_end_time,
),
CustomEvent(
title='Free-market sub-orbital tattoo',
location=Location.R3,
begin_time=block_begin_time,
end_time=block_end_time,
),
]
return events
@pytest.fixture
def mismatch_block_begin_time(make_time):
return make_time(9)
@pytest.fixture
def mismatch_block_mid_time(make_time):
return make_time(10)
@pytest.fixture
def mismatch_block_end_time(make_time):
return make_time(11)
@pytest.fixture
def mismatch_block_events(
mismatch_block_begin_time, mismatch_block_mid_time,
mismatch_block_end_time):
events = [
CustomEvent(
title='Refreshment',
location=Location.R012,
begin_time=mismatch_block_begin_time,
end_time=mismatch_block_end_time,
),
CustomEvent(
title='Free-market sub-orbital tattoo',
location=Location.R3,
begin_time=mismatch_block_begin_time,
end_time=mismatch_block_mid_time,
),
]
return events
@pytest.fixture
def multirow_block_begin_time(make_time):
return make_time(12)
@pytest.fixture
def multirow_block_mid_time(make_time):
return make_time(13)
@pytest.fixture
def multirow_block_end_time(make_time):
return make_time(14)
@pytest.fixture
def multirow_block_events(
multirow_block_begin_time, multirow_block_mid_time,
multirow_block_end_time):
events = [
CustomEvent(
title='Boost Maintainability',
location=Location.R0,
begin_time=multirow_block_begin_time,
end_time=multirow_block_mid_time,
),
CustomEvent(
title='We Made the PyCon TW 2016 Website',
location=Location.R1,
begin_time=multirow_block_begin_time,
end_time=multirow_block_mid_time,
),
CustomEvent(
title='Deep Learning and Application in Python',
location=Location.R2,
begin_time=multirow_block_begin_time,
end_time=multirow_block_mid_time,
),
CustomEvent(
title='Free-market sub-orbital tattoo',
location=Location.R3,
begin_time=multirow_block_begin_time,
end_time=multirow_block_end_time,
),
CustomEvent(
title='Refreshment',
location=Location.R012,
begin_time=multirow_block_mid_time,
end_time=multirow_block_end_time,
),
]
return events
|
pycontw/pycontw2016
|
src/events/tests/renderers/conftest.py
|
Python
|
mit
| 8,032
|
[
"Amber"
] |
49ac2af6f3aefd0c160b09f177bf357af48b734df75391b8ff4505ce37191f46
|
#########################################################################
#
# detectors.py - This file is part of the Spectral Python (SPy)
# package.
#
# Copyright (C) 2012-2013 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, tboggs@users.sourceforge.net
#
'''
Spectral target detection algorithms
'''
from __future__ import division, print_function, unicode_literals
__all__ = ['MatchedFilter', 'matched_filter', 'RX', 'rx', 'ace']
import numpy as np
from spectral.algorithms.transforms import LinearTransform
class MatchedFilter(LinearTransform):
r'''A callable linear matched filter.
Given target/background means and a common covariance matrix, the matched
filter response is given by:
.. math::
y=\frac{(\mu_t-\mu_b)^T\Sigma^{-1}(x-\mu_b)}{(\mu_t-\mu_b)^T\Sigma^{-1}(\mu_t-\mu_b)}
where :math:`\mu_t` is the target mean, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the covariance.
'''
def __init__(self, background, target):
'''Creates the filter, given background/target means and covariance.
Arguments:
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`).
`target` (ndarray):
Length-K target mean
'''
from math import sqrt
from spectral.algorithms.transforms import LinearTransform
self.background = background
self.u_b = background.mean
self.u_t = target
self._whitening_transform = None
d_tb = (target - self.u_b)
self.d_tb = d_tb
C_1 = background.inv_cov
self.C_1 = C_1
# Normalization coefficient (inverse of squared Mahalanobis distance
# between u_t and u_b)
self.coef = 1.0 / d_tb.dot(C_1).dot(d_tb)
LinearTransform.__init__(
self, (self.coef * d_tb).dot(C_1), pre=-self.u_b)
def whiten(self, X):
'''Transforms data to the whitened space of the background.
Arguments:
`X` (ndarray):
Size (M,N,K) or (M*N,K) array of length K vectors to transform.
Returns an array of same size as `X` but linearly transformed to the
whitened space of the filter.
'''
import math
from spectral.algorithms.transforms import LinearTransform
from spectral.algorithms.spymath import matrix_sqrt
if self._whitening_transform is None:
A = math.sqrt(self.coef) * self.background.sqrt_inv_cov
self._whitening_transform = LinearTransform(A, pre=-self.u_b)
return self._whitening_transform(X)
def matched_filter(X, target, background=None, window=None, cov=None):
r'''Computes a linear matched filter target detector score.
Usage:
y = matched_filter(X, target, background)
y = matched_filter(X, target, window=<win> [, cov=<cov>])
Given target/background means and a common covariance matrix, the matched
filter response is given by:
.. math::
y=\frac{(\mu_t-\mu_b)^T\Sigma^{-1}(x-\mu_b)}{(\mu_t-\mu_b)^T\Sigma^{-1}(\mu_t-\mu_b)}
where :math:`\mu_t` is the target mean, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the covariance.
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an image with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`target` (ndarray):
Length-K vector specifying the target to be detected.
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats` for an image). This argument is not
required if `window` is given.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`. If this argument is given, `background`
is not required (and will be ignored, if given).
The window is modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all matched filter calculations (background
covariance will not be recomputed in each window). Only the
background mean will be recomputed in each window). If the
`window` argument is specified, providing `cov` will allow the
result to be computed *much* faster.
Returns numpy.ndarray:
The return value will be the matched filter scores distance) for each
pixel given. If `X` has shape (R, C, K), the returned ndarray will
have shape (R, C).
'''
if background is not None and window is not None:
raise ValueError('`background` and `window` are mutually ' \
'exclusive arguments.')
if window is not None:
from .spatial import map_outer_window_stats
def mf_wrapper(bg, x):
return MatchedFilter(bg, target)(x)
return map_outer_window_stats(mf_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
from spectral.algorithms.algorithms import calc_stats
if background is None:
background = calc_stats(X)
return MatchedFilter(background, target)(X)
class RX():
r'''An implementation of the RX anomaly detector. Given the mean and
covariance of the background, this detector returns the squared Mahalanobis
distance of a spectrum according to
.. math::
y=(x-\mu_b)^T\Sigma^{-1}(x-\mu_b)
where `x` is the unknown pixel spectrum, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the background covariance.
References:
Reed, I.S. and Yu, X., "Adaptive multiple-band CFAR detection of an optical
pattern with unknown spectral distribution," IEEE Trans. Acoust.,
Speech, Signal Processing, vol. 38, pp. 1760-1770, Oct. 1990.
'''
dim_out=1
def __init__(self, background=None):
'''Creates the detector, given optional background/target stats.
Arguments:
`background` (`GaussianStats`, default None):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
'''
from math import sqrt
if background is not None:
self.set_background(background)
else:
self.background = None
def set_background(self, stats):
'''Sets background statistics to be used when applying the detector.'''
self.background = stats
def __call__(self, X):
'''Applies the RX anomaly detector to X.
Arguments:
`X` (numpy.ndarray):
For an image with shape (R, C, B), `X` can be a vector of
length B (single pixel) or an ndarray of shape (R, C, B) or
(R * C, B).
Returns numpy.ndarray or float:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` is a single pixel, a float
will be returned; otherwise, the return value will be an ndarray
of floats with one less dimension than the input.
'''
from spectral.algorithms.algorithms import calc_stats
if not isinstance(X, np.ndarray):
raise TypeError('Expected a numpy.ndarray.')
if self.background is None:
self.set_background(calc_stats(X))
X = (X - self.background.mean)
C_1 = self.background.inv_cov
ndim = X.ndim
shape = X.shape
if ndim == 1:
return X.dot(C_1).dot(X)
if ndim == 3:
X = X.reshape((-1, X.shape[-1]))
A = X.dot(C_1)
r = np.einsum('ij,ij->i', A, X)
return r.reshape(shape[:-1])
# I tried using einsum for the above calculations but, surprisingly,
# it was *much* slower than using dot & sum. Need to figure out if
# that is due to multithreading or some other reason.
# print 'ndim =', ndim
# if ndim == 1:
# return np.einsum('i,ij,j', X, self.background.inv_cov, X)
# if ndim == 3:
# return np.einsum('ijk,km,ijm->ij',
# X, self.background.inv_cov, X).squeeze()
# elif ndim == 2:
# return np.einsum('ik,km,im->i',
# X, self.background.inv_cov, X).squeeze()
# else:
# raise Exception('Unexpected number of dimensions.')
#
def rx(X, background=None, window=None, cov=None):
r'''Computes RX anomaly detector scores.
Usage:
y = rx(X [, background=bg])
y = rx(X, window=(inner, outer) [, cov=C])
The RX anomaly detector produces a detection statistic equal to the
squared Mahalanobis distance of a spectrum from a background distribution
according to
.. math::
y=(x-\mu_b)^T\Sigma^{-1}(x-\mu_b)
where `x` is the pixel spectrum, :math:`\mu_b` is the background
mean, and :math:`\Sigma` is the background covariance.
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an image with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`.
The window are modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all RX calculations (background covariance
will not be recomputed in each window). Only the background
mean will be recomputed in each window).
Returns numpy.ndarray:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` has shape (R, C, B), the
returned ndarray will have shape (R, C)..
References:
Reed, I.S. and Yu, X., "Adaptive multiple-band CFAR detection of an optical
pattern with unknown spectral distribution," IEEE Trans. Acoust.,
Speech, Signal Processing, vol. 38, pp. 1760-1770, Oct. 1990.
'''
if background is not None and window is not None:
raise ValueError('`background` and `window` keywords are mutually ' \
'exclusive.')
if window is not None:
from .spatial import map_outer_window_stats
rx = RX()
def rx_wrapper(bg, x):
rx.set_background(bg)
return rx(x)
return map_outer_window_stats(rx_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
return RX(background)(X)
class ACE():
r'''Adaptive Coherence/Cosine Estimator (ACE).
'''
def __init__(self, target, background=None, **kwargs):
'''Creates the callable detector for target and background.
Arguments:
`target` (ndarray or sequence of ndarray):
Can be either:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
Keyword Arguments:
`vectorize` (bool, default True):
Specifies whether the __call__ method should attempt to vectorize
operations. This typicall results in faster computation but will
consume more memory.
'''
for k in kwargs:
if k not in ('vectorize'):
raise ValueError('Invalid keyword: {0}'.format(k))
self.vectorize = kwargs.get('vectorize', True)
self._target = None
self._background = None
self.set_target(target)
if background is not None:
self.set_background(background)
else:
self._background = None
def set_target(self, target):
'''Specifies target or target subspace used by the detector.
Arguments:
`target` (ndarray or sequence of ndarray):
Can be either:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
'''
if target is None:
self._target = None
else:
self._target = np.array(target, ndmin=2)
self._update_constants()
def set_background(self, stats):
'''Sets background statistics to be used when applying the detector.
Arguments:
`stats` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats`). If no background stats are
provided, they will be estimated based on data passed to the
detector.
'''
self._background = stats
self._update_constants()
def _update_constants(self):
'''Computes and caches constants used when applying the detector.'''
if self._background is not None and self._target is not None:
if self._background.mean is not None:
target = (self._target - self._background.mean).T
else:
target = self._target.T
self._S = self._background.sqrt_inv_cov.dot(target)
self._P = self._S.dot(np.linalg.pinv(self._S))
else:
self._C = None
self._P = None
def __call__(self, X):
'''Compute ACE detector scores for X.
Arguments:
`X` (numpy.ndarray):
For an image with shape (R, C, B), `X` can be a vector of
length B (single pixel) or an ndarray of shape (R, C, B) or
(R * C, B).
Returns numpy.ndarray or float:
The return value will be the RX detector score (squared Mahalanobis
distance) for each pixel given. If `X` is a single pixel, a float
will be returned; otherwise, the return value will be an ndarray
of floats with one less dimension than the input.
'''
from spectral.algorithms.algorithms import calc_stats
if not isinstance(X, np.ndarray):
raise TypeError('Expected a numpy.ndarray.')
shape = X.shape
if X.ndim == 1:
# Compute ACE score for single pixel
if self._background.mean is not None:
X = X - self._background.mean
z = self._background.sqrt_inv_cov.dot(X)
return z.dot(self._P).dot(z) / (z.dot(z))
if self._background is None:
self.set_background(calc_stats(X))
if self.vectorize:
# Compute all scores at once
if self._background.mean is not None:
X = X - self._background.mean
if X.ndim == 3:
X = X.reshape((-1, X.shape[-1]))
z = self._background.sqrt_inv_cov.dot(X.T).T
zP = np.dot(z, self._P)
zPz = np.einsum('ij,ij->i', zP, z)
zz = np.einsum('ij,ij->i', z, z)
return (zPz / zz).reshape(shape[:-1])
else:
# Call recursively for each pixel
return np.apply_along_axis(self, -1, X)
def ace(X, target, background=None, window=None, cov=None, **kwargs):
r'''Returns Adaptive Coherence/Cosine Estimator (ACE) detection scores.
Usage:
y = ace(X, target, background)
y = ace(X, target, window=<win> [, cov=<cov>])
Arguments:
`X` (numpy.ndarray):
For the first calling method shown, `X` can be an ndarray with
shape (R, C, B) or an ndarray of shape (R * C, B). If the
`background` keyword is given, it will be used for the image
background statistics; otherwise, background statistics will be
computed from `X`.
If the `window` keyword is given, `X` must be a 3-dimensional
array and background statistics will be computed for each point
in the image using a local window defined by the keyword.
`target` (ndarray or sequence of ndarray):
If `X` has shape (R, C, B), `target` can be any of the following:
A length-B ndarray. In this case, `target` specifies a single
target spectrum to be detected. The return value will be an
ndarray with shape (R, C).
An ndarray with shape (D, B). In this case, `target` contains
`D` length-B targets that define a subspace for the detector.
The return value will be an ndarray with shape (R, C).
A length-D sequence (e.g., list or tuple) of length-B ndarrays.
In this case, the detector will be applied seperately to each of
the `D` targets. This is equivalent to calling the function
sequentially for each target and stacking the results but is
much faster. The return value will be an ndarray with shape
(R, C, D).
`background` (`GaussianStats`):
The Gaussian statistics for the background (e.g., the result
of calling :func:`calc_stats` for an image). This argument is not
required if `window` is given.
`window` (2-tuple of odd integers):
Must have the form (`inner`, `outer`), where the two values
specify the widths (in pixels) of inner and outer windows centered
about the pixel being evaulated. Both values must be odd integers.
The background mean and covariance will be estimated from pixels
in the outer window, excluding pixels within the inner window. For
example, if (`inner`, `outer`) = (5, 21), then the number of
pixels used to estimate background statistics will be
:math:`21^2 - 5^2 = 416`. If this argument is given, `background`
is not required (and will be ignored, if given).
The window is modified near image borders, where full, centered
windows cannot be created. The outer window will be shifted, as
needed, to ensure that the outer window still has height and width
`outer` (in this situation, the pixel being evaluated will not be
at the center of the outer window). The inner window will be
clipped, as needed, near image borders. For example, assume an
image with 145 rows and columns. If the window used is
(5, 21), then for the image pixel at (0, 0) (upper left corner),
the the inner window will cover `image[:3, :3]` and the outer
window will cover `image[:21, :21]`. For the pixel at (50, 1), the
inner window will cover `image[48:53, :4]` and the outer window
will cover `image[40:51, :21]`.
`cov` (ndarray):
An optional covariance to use. If this parameter is given, `cov`
will be used for all matched filter calculations (background
covariance will not be recomputed in each window). Only the
background mean will be recomputed in each window). If the
`window` argument is specified, providing `cov` will allow the
result to be computed *much* faster.
Keyword Arguments:
`vectorize` (bool, default True):
Specifies whether the function should attempt to vectorize
operations. This typicall results in faster computation but will
consume more memory.
Returns numpy.ndarray:
The return value will be the ACE scores for each input pixel. The shape
of the returned array will be either (R, C) or (R, C, D), depending on
the value of the `target` argument.
References:
Kraut S. & Scharf L.L., "The CFAR Adaptive Subspace Detector is a Scale-
Invariant GLRT," IEEE Trans. Signal Processing., vol. 47 no. 9, pp. 2538-41,
Sep. 1999
'''
import spectral as spy
if background is not None and window is not None:
raise ValueError('`background` and `window` keywords are mutually ' \
'exclusive.')
detector = ACE(target, background, **kwargs)
if window is None:
# Use common background statistics for all pixels
if isinstance(target, np.ndarray):
# Single detector score for target subspace for each pixel
result = detector(X)
else:
# Separate score arrays for each target in target list
if background is None:
detector.set_background(spy.calc_stats(X))
def apply_to_target(t):
detector.set_target(t)
return detector(X)
result = np.array([apply_to_target(t) for t in target])
if result.ndim == 3:
result = result.transpose(1, 2, 0)
else:
# Compute local background statistics for each pixel
from spectral.algorithms.spatial import map_outer_window_stats
if isinstance(target, np.ndarray):
# Single detector score for target subspace for each pixel
def ace_wrapper(bg, x):
detector.set_background(bg)
return detector(x)
result = map_outer_window_stats(ace_wrapper, X, window[0], window[1],
dim_out=1, cov=cov)
else:
# Separate score arrays for each target in target list
def apply_to_target(t, x):
detector.set_target(t)
return detector(x)
def ace_wrapper(bg, x):
detector.set_background(bg)
return [apply_to_target(t, x) for t in target]
result = map_outer_window_stats(ace_wrapper, X, window[0], window[1],
dim_out=len(target), cov=cov)
if result.ndim == 3:
result = result.transpose(1, 2, 0)
# Convert NaN values to zero
result = np.nan_to_num(result)
if isinstance(result, np.ndarray):
return np.clip(result, 0, 1, out=result)
else:
return np.clip(result, 0, 1)
|
ohspite/spectral
|
spectral/algorithms/detectors.py
|
Python
|
gpl-2.0
| 28,119
|
[
"Gaussian"
] |
574c3f225b5b3728cc1b315f4019b2b9b8b85d48e831fc2d6a671446f90b4b91
|
#!/usr/bin/env python3
# Tool to discover 'smells' in the Discogs data via the API. It downloads
# release data and flags releases that need to be fixed.
#
# The checks are (nearly) identical to cleanup-discogs.py
#
# The results that are printed by this script are by no means complete
# or accurate.
#
# Licensed under the terms of the General Public License version 3
#
# SPDX-License-Identifier: GPL-3.0-only
#
# Copyright 2017 - 2019 - Armijn Hemel for Tjaldur Software Governance Solutions
import sys
import os
import re
import datetime
import time
import json
import subprocess
import argparse
import configparser
import tempfile
import requests
import discogssmells
# grab the current year. Make sure to set the clock of your machine
# to the correct date or use NTP!
currentyear = datetime.datetime.utcnow().year
# grab the latest release from the API. Results tend to get cached
# by the Discogs nginx instance for some reason.
def get_latest_release(headers):
latest = 'https://api.discogs.com/database/search?type=release&sort=date_added'
r = requests.get(latest, headers=headers)
if r.status_code != 200:
return
# now parse the response
responsejson = r.json()
if not 'results' in responsejson:
return
return responsejson['results'][0]['id']
# convenience method to check if roles are valid
def checkrole(artist, release_id, credits):
invalidroles = []
if not '[' in artist['role']:
roles = map(lambda x: x.strip(), artist['role'].split(','))
for role in roles:
if role == '':
continue
if not role in credits:
invalidroles.append(role)
else:
# sometimes there is an additional description in the role in
# between [ and ]
# This method is definitely not catching everything.
rolesplit = artist['role'].split('[')
for rs in rolesplit:
if ']' in rs:
rs_tmp = rs
while ']' in rs_tmp:
rs_tmp = rs_tmp.split(']', 1)[1]
roles = map(lambda x: x.strip(), rs_tmp.split(','))
for role in roles:
if role == '':
continue
# ugly hack because sometimes the extra data between [ and ]
# appears halfway the words in a role, sigh.
if role == 'By':
continue
if not role in credits:
invalidroles.append(role)
return invalidroles
# process the contents of a release
def processrelease(release, config_settings, count, credits, ibuddy, favourites):
releaseurl = 'https://www.discogs.com/release/%s'
# only process entries that have a status of 'Accepted'
if release['status'] == 'Rejected':
return count
elif release['status'] == 'Draft':
return count
elif release['status'] == 'Deleted':
return count
errormsgs = []
# store some data that is used by multiple checks
founddeposito = False
year = None
release_id = release['id']
# check for favourite artist, if defined
for artist in release['artists']:
if artist['name'] in favourites:
if ibuddy != None:
ibuddy.executecommand('HEART:WINGSHIGH:RED:GO:SHORTSLEEP:NOHEART:WINGSLOW:GO:SHORTSLEEP:HEART:LEFT::WINGSHIGH::GO:SHORTSLEEP:NOHEART:RIGHT:GO:HEART:GO:BLUE:SHORTSLEEP:WINGSLOW:GO:SHORTSLEEP:RESET')
ibuddy.reset()
if config_settings['use_notify_send']:
count += 1
errormsgs.append('%8d -- Favourite Artist (%s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
# check for misspellings of Czechoslovak and Czech releases
# People use 0x115 instead of 0x11B, which look very similar but 0x115
# is not valid in the Czech alphabet. Check for all data except
# the YouTube playlist.
# https://www.discogs.com/group/thread/757556
# This is important for the following elements:
# * tracklist (title, subtracks not supported yet)
# * artist and extraartists (including extraartists in tracklist)
# * notes
# * BaOI identifiers (both value and description)
if config_settings['check_spelling_cs']:
if 'country' in release:
if release['country'] == 'Czechoslovakia' or release['country'] == 'Czech Republic':
for t in release['tracklist']:
if chr(0x115) in t['title']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, tracklist: %s): https://www.discogs.com/release/%s' % (count, t['position'], str(release_id)))
if 'extraartists' in t:
for artist in t['extraartists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name at: %s): https://www.discogs.com/release/%s' % (count, t['position'], str(release_id)))
if 'artists' in release:
for artist in release['artists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name: %s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
if 'extraartists' in release:
for artist in release['extraartists']:
if chr(0x115) in artist['name']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, artist name: %s): https://www.discogs.com/release/%s' % (count, artist['name'], str(release_id)))
for i in release['identifiers']:
if chr(0x115) in i['value']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'description' in i:
if chr(0x115) in i['description']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'notes' in release:
if chr(0x115) in release['notes']:
count += 1
errormsgs.append('%8d -- Czech character (0x115, Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
# check credit roles in three places:
# 1. artists
# 2. extraartists (release level)
# 3. extraartists (track level)
if 'check_credits' in config_settings:
if config_settings['check_credits']:
if 'artists' in release:
for artist in release['artists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
if 'extraartists' in release:
for artist in release['extraartists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
for t in release['tracklist']:
if 'extraartists' in t:
for artist in t['extraartists']:
if 'role' in artist:
invalidroles = checkrole(artist, release_id, credits)
for role in invalidroles:
count += 1
errormsgs.append('%8d -- Role \'%s\' invalid: https://www.discogs.com/release/%s' % (count, role, str(release_id)))
# check release month and year
if 'released' in release:
if config_settings['check_month']:
if '-' in release['released']:
monthres = re.search('-(\d+)-', release['released'])
if monthres != None:
monthnr = int(monthres.groups()[0])
if monthnr == 0:
count += 1
errormsgs.append('%8d -- Month 00: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif monthnr > 12:
count += 1
errormsgs.append('%8d -- Month impossible (%d): https://www.discogs.com/release/%s' % (count, monthnr, str(release_id)))
try:
year = int(release['released'].split('-', 1)[0])
# TODO: check for implausible old years
except ValueError:
if config_settings['check_year']:
count += 1
errormsgs.append('%8d -- Year \'%s\' invalid: https://www.discogs.com/release/%s' % (count, release['released'], str(release_id)))
# check the tracklist
tracklistcorrect = True
tracklistpositions = set()
formattexts = set()
if config_settings['check_tracklisting'] and len(release['formats']) == 1:
formattext = release['formats'][0]['name']
formattexts.add(formattext)
formatqty = int(release['formats'][0]['qty'])
for t in release['tracklist']:
if tracklistcorrect:
if formattext in ['Vinyl', 'Cassette', 'Shellac', '8-Track Cartridge']:
try:
int(t['position'])
count += 1
errormsgs.append('%8d -- Tracklisting (%s): https://www.discogs.com/release/%s' % (count, formattext, str(release_id)))
tracklistcorrect = False
break
except:
pass
if formatqty == 1:
if t['position'].strip() != '' and t['position'].strip() != '-' and t['type_'] != 'heading' and t['position'] in tracklistpositions:
count += 1
errormsgs.append('%8d -- Tracklisting reuse (%s, %s): https://www.discogs.com/release/%s' % (count, formattext, t['position'], str(release_id)))
tracklistpositions.add(t['position'])
# various checks for labels
for l in release['labels']:
# check for several identifiers being used as catalog numbers
if 'catno' in l:
if config_settings['check_label_code']:
if l['catno'].lower().startswith('lc'):
falsepositive = False
# American releases on Epic (label 1005 in Discogs)
# sometimes start with LC
if l['id'] == 1005:
falsepositive = True
if not falsepositive:
if discogssmells.labelcodere.match(l['catno'].lower()) != None:
count += 1
errormsgs.append('%8d -- Possible Label Code (in Catalogue Number): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_deposito']:
# now check for D.L.
dlfound = False
for d in discogssmells.depositores:
result = d.search(l['catno'])
if result != None:
for depositovalre in discogssmells.depositovalres:
if depositovalre.search(l['catno']) != None:
dlfound = True
break
if dlfound:
count += 1
errormsgs.append('%8d -- Possible Depósito Legal (in Catalogue Number): https://www.discogs.com/release/%s' % (count, str(release_id)))
if 'name' in l:
if config_settings['check_label_name']:
if l['name'] == 'London' and l['id'] == 26905:
count += 1
errormsgs.append('%8d -- Wrong label (London): https://www.discogs.com/release/%s' % (count, str(release_id)))
'''
if name == 'format':
for (k,v) in attrs.items():
if k == 'name':
if v == 'CD':
self.iscd = True
self.formattexts.add(v)
elif k == 'qty':
if self.formatmaxqty == 0:
self.formatmaxqty = max(self.formatmaxqty, int(v))
else:
self.formatmaxqty += int(v)
'''
# various checks for the formats
formattexts = set()
for f in release['formats']:
if 'descriptions' in f:
if 'Styrene' in f['descriptions']:
pass
# store the names of the formats. This is useful later for SID code checks
if 'name' in f:
formattexts.add(f['name'])
if 'text' in f:
if f['text'] != '':
if config_settings['check_spars_code']:
tmpspars = f['text'].lower().strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if tmpspars in discogssmells.validsparscodes:
count += 1
errormsgs.append('%8d -- Possible SPARS Code (in Format): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_label_code']:
if f['text'].lower().startswith('lc'):
if discogssmells.labelcodere.match(f['text'].lower()) != None:
count += 1
errormsgs.append('%8d -- Possible Label Code (in Format): https://www.discogs.com/release/%s' % (count, str(release_id)))
# walk through the BaOI identifiers
for identifier in release['identifiers']:
v = identifier['value']
if config_settings['check_creative_commons']:
if 'creative commons' in v.lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
if 'description' in identifier:
if 'creative commons' in identifier['description'].lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
if config_settings['check_spars_code']:
if identifier['type'] == 'SPARS Code':
if v.lower() != "none":
# Sony format codes
# https://www.discogs.com/forum/thread/339244
# https://www.discogs.com/forum/thread/358285
if v == 'CDC' or v == 'CDM':
count += 1
errormsgs.append('%8d -- Sony Format Code in SPARS: https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
tmpspars = v.lower().strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if not tmpspars in discogssmells.validsparscodes:
count += 1
errormsgs.append('%8d -- SPARS Code (format): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# first check the description free text field
sparsfound = False
if 'description' in identifier:
for spars in discogssmells.spars_ftf:
if spars in identifier['description'].lower():
sparsfound = True
# then also check the value to see if there is a valid SPARS
if v.lower() in discogssmells.validsparscodes:
sparsfound = True
else:
if 'd' in v.lower():
tmpspars = v.strip()
for s in ['.', ' ', '•', '·', '[', ']', '-', '|', '/']:
tmpspars = tmpspars.replace(s, '')
if tmpspars in discogssmells.validsparscodes:
sparsfound = True
# print error if some SPARS code reference was found
if sparsfound:
count += 1
errormsgs.append('%8d -- SPARS Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_label_code']:
if identifier['type'] == 'Label Code':
# check how many people use 'O' instead of '0'
if v.lower().startswith('lc'):
if 'O' in identifier['value']:
errormsgs.append('%8d -- Spelling error in Label Code): https://www.discogs.com/release/%s' % (count, str(release_id)))
sys.stdout.flush()
if discogssmells.labelcodere.match(v.lower()) is None:
count += 1
errormsgs.append('%8d -- Label Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if identifier['type'] == 'Rights Society':
if v.lower().startswith('lc'):
if discogssmells.labelcodere.match(v.lower()) != None:
count += 1
errormsgs.append('%8d -- Label Code (in Rights Society): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif identifier['type'] == 'Barcode':
if v.lower().startswith('lc'):
if discogssmells.labelcodere.match(v.lower()) != None:
count += 1
errormsgs.append('%8d -- Label Code (in Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower() in discogssmells.label_code_ftf:
count += 1
errormsgs.append('%8d -- Label Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_rights_society']:
if identifier['type'] != 'Rights Society':
foundrightssociety = False
for r in discogssmells.rights_societies:
if v.replace('.', '') == r or v.replace(' ', '') == r:
count += 1
foundrightssociety = True
if identifier['type'] == 'Barcode':
errormsgs.append('%8d -- Rights Society (Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
errormsgs.append('%8d -- Rights Society (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
if not foundrightssociety and 'description' in identifier:
if identifier['description'].lower() in discogssmells.rights_societies_ftf:
count += 1
errormsgs.append('%8d -- Rights Society: https://www.discogs.com/release/%s' % (count, str(release_id)))
# temporary hack, move to own configuration option
asinstrict = False
if config_settings['check_asin']:
if identifier['type'] == 'ASIN':
if not asinstrict:
tmpasin = v.strip().replace('-', '')
else:
tmpasin = v
if not len(tmpasin.split(':')[-1].strip()) == 10:
count += 1
errormsgs.append('%8d -- ASIN (wrong length): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower().startswith('asin'):
count += 1
errormsgs.append('%8d -- ASIN (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_isrc']:
if identifier['type'] == 'ISRC':
# Check the length of ISRC fields. According to the
# specifications these should be 12 in length. Some ISRC
# identifiers that have been recorded in the database
# span a range of tracks. These will be reported as wrong ISRC
# codes. It is unclear what needs to be done with those.
# first get rid of cruft
isrc_tmp = v.strip().upper()
if isrc_tmp.startswith('ISRC'):
isrc_tmp = isrc_tmp.split('ISRC')[-1].strip()
if isrc_tmp.startswith('CODE'):
isrc_tmp = isrc_tmp.split('CODE')[-1].strip()
# replace a few characters
isrc_tmp = isrc_tmp.replace('-', '')
isrc_tmp = isrc_tmp.replace(' ', '')
isrc_tmp = isrc_tmp.replace('.', '')
isrc_tmp = isrc_tmp.replace(':', '')
isrc_tmp = isrc_tmp.replace('–', '')
if not len(isrc_tmp) == 12:
count += 1
errormsgs.append('%8d -- ISRC (wrong length): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
if identifier['description'].lower().startswith('isrc'):
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif identifier['description'].lower().startswith('issrc'):
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
for isrc in discogssmells.isrc_ftf:
if isrc in identifier['description'].lower():
count += 1
errormsgs.append('%8d -- ISRC Code (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
if identifier['type'] == 'Barcode':
pass
# check depósito legal in BaOI
if config_settings['check_deposito']:
if 'country' in release:
if release['country'] == 'Spain':
if identifier['type'] == 'Depósito Legal':
founddeposito = True
if v.strip().endswith('.'):
count += 1
errormsgs.append('%8d -- Depósito Legal (formatting): https://www.discogs.com/release/%s' % (count, str(release_id)))
if year != None:
# now try to find the year
depositoyear = None
if v.strip().endswith('℗'):
count += 1
errormsgs.append('%8d -- Depósito Legal (formatting, has ℗): https://www.discogs.com/release/%s' % (count, str(release_id)))
# ugly hack, remove ℗ to make at least be able to do some sort of check
v = v.strip().rsplit('℗', 1)[0]
# several separators, including some Unicode ones
for sep in ['-', '–', '/', '.', ' ', '\'', '_']:
try:
depositoyeartext = v.strip().rsplit(sep, 1)[-1]
if sep == '.' and len(depositoyeartext) == 3:
continue
if '.' in depositoyeartext:
depositoyeartext = depositoyeartext.replace('.', '')
depositoyear = int(depositoyeartext)
if depositoyear < 100:
# correct the year. This won't work correctly after 2099.
if depositoyear <= currentyear - 2000:
depositoyear += 2000
else:
depositoyear += 1900
break
except:
pass
# TODO, also allow (year), example: https://www.discogs.com/release/265497
if depositoyear != None:
if depositoyear < 1900:
count += 1
errormsgs.append("%8d -- Depósito Legal (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif depositoyear > currentyear:
count += 1
errormsgs.append("%8d -- Depósito Legal (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < depositoyear:
count += 1
errormsgs.append("%8d -- Depósito Legal (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append("%8d -- Depósito Legal (year not found): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif identifier['type'] == 'Barcode':
for depositovalre in discogssmells.depositovalres:
if depositovalre.match(v.lower()) != None:
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (in Barcode): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
else:
if v.startswith("Depósito"):
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
elif v.startswith("D.L."):
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
found = False
for d in discogssmells.depositores:
result = d.search(identifier['description'].lower())
if result != None:
found = True
break
# sometimes the depósito value itself can be found in the free text field
if not found:
for depositovalre in discogssmells.depositovalres:
deposres = depositovalre.match(identifier['description'].lower())
if deposres != None:
found = True
break
if found:
founddeposito = True
count += 1
errormsgs.append('%8d -- Depósito Legal (BaOI): https://www.discogs.com/release/%s' % (count, str(release_id)))
# temporary hack, move to own configuration option
mould_sid_strict = False
if config_settings['check_mould_sid']:
if identifier['type'] == 'Mould SID Code':
if v.strip() != 'none':
# cleanup first for not so heavy formatting booboos
mould_tmp = v.strip().lower().replace(' ', '')
mould_tmp = mould_tmp.replace('-', '')
# some people insist on using ƒ instead of f
mould_tmp = mould_tmp.replace('ƒ', 'f')
res = discogssmells.mouldsidre.match(mould_tmp)
if res is None:
count += 1
errormsgs.append('%8d -- Mould SID Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if mould_sid_strict:
mould_split = mould_tmp.split('ifpi', 1)[-1]
for ch in ['i', 'o', 's', 'q']:
if ch in mould_split[-2:]:
count += 1
errormsgs.append('%8d -- Mould SID Code (strict value): https://www.discogs.com/release/%s' % (count, str(release_id)))
# rough check to find SID codes for formats other than CD/CD-like
if len(formattexts) == 1:
for fmt in set(['Vinyl', 'Cassette', 'Shellac', 'File', 'VHS', 'DCC', 'Memory Stick', 'Edison Disc']):
if fmt in formattexts:
count += 1
errormsgs.append('%8d -- Mould SID Code (Wrong Format: %s): https://www.discogs.com/release/%s' % (count, fmt, str(release_id)))
break
if year != None:
if year < 1993:
count += 1
errormsgs.append('%8d -- SID Code (wrong year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
description = identifier['description'].lower()
# squash repeated spaces
description = re.sub('\s+', ' ', description)
description = description.strip()
if description in ['source identification code', 'sid', 'sid code', 'sid-code']:
count += 1
errormsgs.append('%8d -- Unspecified SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in discogssmells.mouldsids:
count += 1
errormsgs.append('%8d -- Mould SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_mastering_sid']:
if identifier['type'] == 'Mastering SID Code':
if v.strip() != 'none':
# cleanup first for not so heavy formatting booboos
master_tmp = v.strip().lower().replace(' ', '')
master_tmp = master_tmp.replace('-', '')
# some people insist on using ƒ instead of f
master_tmp = master_tmp.replace('ƒ', 'f')
res = discogssmells.masteringsidre.match(master_tmp)
if res is None:
count += 1
errormsgs.append('%8d -- Mastering SID Code (value): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# rough check to find SID codes for formats other than CD/CD-like
if len(formattexts) == 1:
for fmt in set(['Vinyl', 'Cassette', 'Shellac', 'File', 'VHS', 'DCC', 'Memory Stick', 'Edison Disc']):
if fmt in formattexts:
count += 1
errormsgs.append('%8d -- Mastering SID Code (Wrong Format: %s): https://www.discogs.com/release/%s' % (count, fmt, str(release_id)))
if year != None:
if year < 1993:
count += 1
errormsgs.append('%8d -- SID Code (wrong year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
if 'description' in identifier:
description = identifier['description'].lower()
# squash repeated spaces
description = re.sub('\s+', ' ', description)
description = description.strip()
if description in ['source identification code', 'sid', 'sid code', 'sid-code']:
count += 1
errormsgs.append('%8d -- Unspecified SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in discogssmells.masteringsids:
count += 1
errormsgs.append('%8d -- Mastering SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
elif description in ['sid code matrix', 'sid code - matrix', 'sid code (matrix)', 'sid-code, matrix', 'sid-code matrix', 'sid code (matrix ring)', 'sid code, matrix ring', 'sid code: matrix ring']:
count += 1
errormsgs.append('%8d -- Possible Mastering SID Code: https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_pkd']:
if 'country' in release:
if release['country'] == 'India':
if 'pkd' in v.lower() or "production date" in v.lower():
if year != None:
# try a few variants
pkdres = re.search("\d{1,2}/((?:19|20)?\d{2})", v)
if pkdres != None:
pkdyear = int(pkdres.groups()[0])
if pkdyear < 100:
# correct the year. This won't work correctly after 2099.
if pkdyear <= currentyear - 2000:
pkdyear += 2000
else:
pkdyear += 1900
if pkdyear < 1900:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif pkdyear > currentyear:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < pkdyear:
count += 1
errormsgs.append("%8d -- Indian PKD (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append('%8d -- India PKD code (no year): https://www.discogs.com/release/%s' % (count, str(release_id)))
else:
# now check the description
if 'description' in identifier:
description = identifier['description'].lower()
if 'pkd' in description or "production date" in description:
if year != None:
# try a few variants
pkdres = re.search("\d{1,2}/((?:19|20)?\d{2})", attrvalue)
if pkdres != None:
pkdyear = int(pkdres.groups()[0])
if pkdyear < 100:
# correct the year. This won't work correctly after 2099.
if pkdyear <= currentyear - 2000:
pkdyear += 2000
else:
pkdyear += 1900
if pkdyear < 1900:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif pkdyear > currentyear:
count += 1
errormsgs.append("%8d -- Indian PKD (impossible year): https://www.discogs.com/release/%s" % (count, str(release_id)))
elif year < pkdyear:
count += 1
errormsgs.append("%8d -- Indian PKD (release date earlier): https://www.discogs.com/release/%s" % (count, str(release_id)))
else:
count += 1
errormsgs.append('%8d -- India PKD code (no year): https://www.discogs.com/release/%s' % (count, str(release_id)))
# check Czechoslovak manufacturing dates
if config_settings['check_manufacturing_date_cs']:
# config hack, needs to be in its own configuration option
strict_cs = False
strict_cs = True
if 'country' in release:
if release['country'] == 'Czechoslovakia':
if 'description' in identifier:
description = identifier['description'].lower()
if 'date' in description:
if year != None:
manufacturing_date_res = re.search("(\d{2})\s+\d$", identifier['value'].rstrip())
if manufacturing_date_res != None:
manufacturing_year = int(manufacturing_date_res.groups()[0])
if manufacturing_year < 100:
manufacturing_year += 1900
if manufacturing_year > year:
count += 1
errormsgs.append("%8d -- Czechoslovak manufacturing date (release year wrong): https://www.discogs.com/release/%s" % (count, str(release_id)))
# possibly this check makes sense, but not always
elif manufacturing_year < year and strict_cs:
count += 1
errormsgs.append("%8d -- Czechoslovak manufacturing date (release year possibly wrong): https://www.discogs.com/release/%s" % (count, str(release_id)))
# finally check the notes for some errors
if 'notes' in release:
if '카지노' in release['notes']:
# Korean casino spam that pops up every once in a while
errormsgs.append('Spam: https://www.discogs.com/release/%s' % str(release_id))
if 'country' in release:
if release['country'] == 'Spain':
if config_settings['check_deposito'] and not founddeposito:
# sometimes "deposito legal" can be found in the "notes" section
content_lower = release['notes'].lower()
for d in discogssmells.depositores:
result = d.search(content_lower)
if result != None:
count += 1
found = True
errormsgs.append('%8d -- Depósito Legal (Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
break
if config_settings['check_html']:
# see https://support.discogs.com/en/support/solutions/articles/13000014661-how-can-i-format-text-
if '<a href="http://www.discogs.com/release/' in release['notes'].lower():
count += 1
errormsgs.append('%8d -- old link (Notes): https://www.discogs.com/release/%s' % (count, str(release_id)))
if config_settings['check_creative_commons']:
ccfound = False
for cc in discogssmells.creativecommons:
if cc in release['notes']:
count += 1
errormsgs.append('%8d -- Creative Commons reference (%s): https://www.discogs.com/release/%s' % (count, cc, str(release)))
ccfound = True
break
if not ccfound:
if 'creative commons' in reales['notes'].lower():
count += 1
errormsgs.append('%8d -- Creative Commons reference: https://www.discogs.com/release/%s' % (count, str(release)))
ccfound = True
break
for e in errormsgs:
print(e)
if config_settings['use_notify_send']:
p = subprocess.Popen(['notify-send', "-t", "3000", "Error", e], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stanout, stanerr) = p.communicate()
sys.stdout.flush()
return count
def main(argv):
parser = argparse.ArgumentParser()
# the following options are provided on the commandline
parser.add_argument("-c", "--config", action="store", dest="cfg", help="path to configuration file", metavar="FILE")
parser.add_argument("-s", "--startvalue", action="store", dest="startvalue", help="start value for releases", metavar="STARTVALUE")
parser.add_argument("-l", "--latest", action="store", dest="latest_value", help="value for latest release", metavar="LATEST")
args = parser.parse_args()
# some checks for the configuration file
if args.cfg is None:
parser.error("Configuration file missing")
if not os.path.exists(args.cfg):
parser.error("Configuration file does not exist")
config = configparser.ConfigParser()
configfile = open(args.cfg, 'r')
try:
config.read_file(configfile)
except Exception:
print("Cannot read configuration file", file=sys.stderr)
sys.exit(1)
startvalue = None
# check for a startvalue
if args.startvalue != None:
try:
startvalue = int(args.startvalue)
except:
parser.error("start value is not a valid integer, exciting")
latest_release = None
# check for a startvalue
if args.latest_value != None:
try:
latest_release = int(args.latest_value)
except:
parser.error("latest value is not a valid integer, exciting")
# process the configuration file and store settings
config_settings = {}
for section in config.sections():
if section == 'cleanup':
# store settings for depósito legal checks
try:
if config.get(section, 'deposito') == 'yes':
config_settings['check_deposito'] = True
else:
config_settings['check_deposito'] = False
except Exception:
config_settings['check_deposito'] = True
# store settings for rights society checks
try:
if config.get(section, 'rights_society') == 'yes':
config_settings['check_rights_society'] = True
else:
config_settings['check_rights_society'] = False
except Exception:
config_settings['check_rights_society'] = True
# store settings for label code checks
try:
if config.get(section, 'label_code') == 'yes':
config_settings['check_label_code'] = True
else:
config_settings['check_label_code'] = False
except Exception:
config_settings['check_label_code'] = True
# store settings for label name checks
try:
if config.get(section, 'label_name') == 'yes':
config_settings['check_label_name'] = True
else:
config_settings['check_label_name'] = False
except Exception:
config_settings['check_label_name'] = True
# store settings for ISRC checks
try:
if config.get(section, 'isrc') == 'yes':
config_settings['check_isrc'] = True
else:
config_settings['check_isrc'] = False
except Exception:
config_settings['check_isrc'] = True
# store settings for ASIN checks
try:
if config.get(section, 'asin') == 'yes':
config_settings['check_asin'] = True
else:
config_settings['check_asin'] = False
except Exception:
config_settings['check_asin'] = True
# store settings for mastering SID checks
try:
if config.get(section, 'mastering_sid') == 'yes':
config_settings['check_mastering_sid'] = True
else:
config_settings['check_mastering_sid'] = False
except Exception:
config_settings['check_mastering_sid'] = True
# store settings for mould SID checks
try:
if config.get(section, 'mould_sid') == 'yes':
config_settings['check_mould_sid'] = True
else:
config_settings['check_mould_sid'] = False
except Exception:
config_settings['check_mould_sid'] = True
# store settings for SPARS Code checks
try:
if config.get(section, 'spars') == 'yes':
config_settings['check_spars_code'] = True
else:
config_settings['check_spars_code'] = False
except Exception:
config_settings['check_spars_code'] = True
# store settings for Indian PKD checks
try:
if config.get(section, 'pkd') == 'yes':
config_settings['check_pkd'] = True
else:
config_settings['check_pkd'] = False
except Exception:
config_settings['check_pkd'] = True
# check for Czechoslovak manufacturing dates
try:
if config.get(section, 'manufacturing_date_cs') == 'yes':
config_settings['check_manufacturing_date_cs'] = True
else:
config_settings['check_manufacturing_date_cs'] = False
except Exception:
config_settings['check_manufacturing_date_cs'] = True
# check for Czechoslovak and Czech spelling (0x115 used instead of 0x11B)
try:
if config.get(section, 'spelling_cs') == 'yes':
config_settings['check_spelling_cs'] = True
else:
config_settings['check_spelling_cs'] = False
except Exception:
config_settings['check_spelling_cs'] = True
# store settings for tracklisting checks, default True
try:
if config.get(section, 'tracklisting') == 'yes':
config_settings['check_tracklisting'] = True
else:
config_settings['check_tracklisting'] = False
except Exception:
config_settings['check_tracklisting'] = True
# store settings for credits list checks
try:
if config.get(section, 'credits') == 'yes':
creditsfile = config.get(section, 'creditsfile')
if os.path.exists(creditsfile):
config_settings['creditsfile'] = creditsfile
config_settings['check_credits'] = True
else:
config_settings['check_credits'] = False
except Exception:
config_settings['check_credits'] = False
# store settings for URLs in Notes checks
try:
if config.get(section, 'html') == 'yes':
config_settings['check_html'] = True
else:
config_settings['check_html'] = False
except Exception:
config_settings['check_html'] = True
# month is 00 check: default is False
try:
if config.get(section, 'month') == 'yes':
config_settings['check_month'] = True
else:
config_settings['check_month'] = False
except Exception:
config_settings['check_month'] = False
# year is wrong check: default is False
try:
if config.get(section, 'year') == 'yes':
config_settings['check_year'] = True
else:
config_settings['check_year'] = False
except Exception:
config_settings['check_year'] = False
# reporting all: default is False
try:
if config.get(section, 'reportall') == 'yes':
config_settings['reportall'] = True
else:
config_settings['reportall'] = False
except Exception:
config_settings['reportall'] = False
# debug: default is False
try:
if config.get(section, 'debug') == 'yes':
config_settings['debug'] = True
else:
config_settings['debug'] = False
except Exception:
config_settings['debug'] = False
# report creative commons references: default is False
try:
if config.get(section, 'creative_commons') == 'yes':
config_settings['check_creative_commons'] = True
else:
config_settings['check_creative_commons'] = False
except Exception:
config_settings['check_creative_commons'] = False
elif section == 'api':
# data directory to store JSON files
try:
storedir = config.get(section, 'storedir')
if not os.path.exists(os.path.normpath(storedir)):
config_settings['storedir'] = None
else:
# test if the directory is writable
testfile = tempfile.mkstemp(dir=storedir)
os.fdopen(testfile[0]).close()
os.unlink(testfile[1])
config_settings['storedir'] = storedir
except Exception:
config_settings['storedir'] = None
break
try:
token = config.get(section, 'token')
config_settings['token'] = token
except Exception:
config_settings['token'] = None
try:
username = config.get(section, 'username')
config_settings['username'] = username
except Exception:
config_settings['username'] = None
# skipdownloaded: default is False
config_settings['skipdownloaded'] = False
try:
if config.get(section, 'skipdownloaded') == 'yes':
config_settings['skipdownloaded'] = True
except Exception:
pass
# skip404: default is True
config_settings['skip404'] = True
try:
if config.get(section, 'skip404') == 'yes':
config_settings['skip404'] = True
else:
config_settings['skip404'] = False
except Exception:
pass
# record404: default is True
config_settings['record404'] = True
try:
if config.get(section, 'record404') == 'yes':
config_settings['record404'] = True
else:
config_settings['record404'] = False
except Exception:
pass
# specify location of 404 file
try:
release404 = os.path.normpath(config.get(section, '404file'))
config_settings['404file'] = release404
except:
pass
# specify whether or not notify-send (Linux desktops
# should be used or not. Not recommended.
config_settings['use_notify_send'] = True
try:
if config.get(section, 'notify') == 'yes':
config_settings['use_notify_send'] = True
else:
config_settings['use_notify_send'] = False
except Exception:
pass
if config_settings['use_notify_send']:
try:
p = subprocess.Popen(['notify-send', "-t", "3000", "Test for notify-send"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stanout, stanerr) = p.communicate()
except Exception:
config_settings['use_notify_send'] = False
configfile.close()
if config_settings['storedir'] is None:
print("Data store directory non-existent or not writable, exiting.", file=sys.stderr)
sys.exit(1)
if config_settings['token'] is None:
print("Token not specified, exiting.", file=sys.stderr)
sys.exit(1)
if config_settings['username'] is None:
print("Discogs user name not specified, exiting.", file=sys.stderr)
sys.exit(1)
# a list of accepted roles. This is an external file, generated with extractcredits.py
# from the 'helper-scripts' directory.
credits = set()
if 'check_credits' in config_settings:
if config_settings['check_credits']:
creditsfile = open(config_settings['creditsfile'], 'r')
credits = set(map(lambda x: x.strip(), creditsfile.readlines()))
creditsfile.close()
# a file with release numbers that give a 404 error
# This needs more work
if config_settings['skip404']:
if '404file' in config_settings:
if not os.path.isabs(config_settings['404file']):
release404filename = os.path.join(config_settings['storedir'], config_settings['404file'])
if not os.path.exists(release404filename):
release404file = open(release404filename, 'w')
release404file.close()
else:
release404filename = config_settings['404file']
else:
# simply create the file
pass
# use a (somewhat) exponential backoff in case too many requests have been made
ratelimitbackoff = 5
# set the User Agent and Authorization header for each user request
useragentstring = "DiscogsCleanupForUser-%s/0.1" % config_settings['username']
headers = {'user-agent': useragentstring,
'Authorization': 'Discogs token=%s' % config_settings['token']
}
if latest_release is None:
latest_release = get_latest_release(headers)
if latest_release is None:
print("Something went wrong, try again later", file=sys.stderr)
sys.exit(1)
# if no start value has been provided start with the latest from the
# Discogs website.
if startvalue is None:
startvalue = latest_release
# populate a set with all the 404s that were found.
skip404s = set()
count = 0
if config_settings['skip404']:
release404file = open(release404filename, 'r')
for l in release404file:
# needs to be made more robust
skip404s.add(int(l.strip()))
release404file.close()
# now open again for writing, so new 404 errors can be
# stored.
release404file = open(release404filename, 'a')
# This is just something very silly: if you have an iBuddy device and
# have the corresponding Python module installed it will respond to
# data it finds (currently only favourite artists).
#
# https://github.com/armijnhemel/py3buddy
#
# Not recommended.
ibuddy_enabled = False
try:
import py3buddy
ibuddy_enabled = True
except:
pass
ibuddy = None
if ibuddy_enabled:
ibuddy_config = {}
ibuddy = py3buddy.iBuddy(ibuddy_config)
if ibuddy.dev is None:
ibuddy = None
ibuddy_enabled = False
# example:
#favourites = set(['Bob Dylan', 'Iron Maiden', 'The Beatles'])
favourites = set()
newsleep = 600
# now start a big loop
# https://www.discogs.com/developers/#page:authentication
while True:
for releasenr in range(startvalue, latest_release+1):
if startvalue == latest_release:
break
targetfilename = os.path.join(storedir, "%d" % (releasenr//1000000), "%d.json" % releasenr)
os.makedirs(os.path.join(storedir, "%d" % (releasenr//1000000)), exist_ok=True)
if config_settings['skip404']:
if releasenr in skip404s:
continue
if config_settings['skipdownloaded']:
if os.path.exists(targetfilename):
if os.stat(targetfilename).st_size != 0:
responsejsonfile = open(targetfilename, 'r')
responsejson = json.loads(responsejsonfile.read())
responsejsonfile.close()
count = processrelease(responsejson, config_settings, count, credits, ibuddy, favourites)
continue
print("downloading: %d" % releasenr, file=sys.stderr)
r = requests.get('https://api.discogs.com/releases/%d' % releasenr, headers=headers)
# now first check the headers to see if it is OK to do more requests
if r.status_code != 200:
if r.status_code == 404:
print("%d" % releasenr, file=release404file)
release404file.flush()
if r.status_code == 429:
if 'Retry-After' in r.headers:
try:
retryafter = int(r.headers['Retry-After'])
print("Rate limiting, sleeping for %d seconds" % retryafter, file=sys.stderr)
time.sleep(retryafter)
sys.stderr.flush()
except:
print("Rate limiting, sleeping for %d seconds" % 60, file=sys.stderr)
time.sleep(60)
sys.stderr.flush()
else:
print("Rate limiting, sleeping for %d seconds" % 60, file=sys.stderr)
time.sleep(60)
sys.stderr.flush()
# TODO: the current release will not have been downloaded and processed
continue
# in case there is no 429 response check the headers
if 'X-Discogs-Ratelimit-Remaining' in r.headers:
ratelimit = int(r.headers['X-Discogs-Ratelimit-Remaining'])
if ratelimit == 0:
# no more requests are allowed, so sleep for some
# time, max 60 seconds
time.sleep(ratelimitbackoff)
print("Rate limiting, sleeping for %d seconds" % ratelimitbackoff, file=sys.stderr)
sys.stderr.flush()
if ratelimitbackoff < 60:
ratelimitbackoff = min(60, ratelimitbackoff * 2)
else:
ratelimitbackoff = 5
# now process the response. This should be JSON, so decode it,
# and also write the JSON data to a separate file for offline
# processing (if necessary).
try:
responsejson = r.json()
jsonreleasefile = open(targetfilename, 'w')
jsonreleasefile.write(r.text)
jsonreleasefile.close()
except:
# response doesn't contain JSON, so something is wrong.
# sleep a bit then continue
time.sleep(2)
continue
# now process the JSON content
count = processrelease(responsejson, config_settings, count, credits, ibuddy, favourites)
# be gentle for Discogs and sleep
time.sleep(0.2)
sys.stderr.flush()
# now set startvalue to latest_release
startvalue = latest_release
# and find the newest release again
print("Grabbing new data", file=sys.stderr)
latest_release = get_latest_release(headers)
if latest_release is None:
print("Something went wrong, try again later", file=sys.stderr)
break
if latest_release < startvalue:
pass
print("Latest = %d" % latest_release, file=sys.stderr)
print("Sleeping for %d seconds" % newsleep, file=sys.stderr)
sys.stderr.flush()
# sleep for ten minutes to make sure some new things
# have been added to Discogs
time.sleep(newsleep)
release404file.close()
if __name__ == "__main__":
main(sys.argv)
|
armijnhemel/cleanup-for-discogs
|
cleanup-discogs-continuous.py
|
Python
|
gpl-3.0
| 63,272
|
[
"CASINO",
"exciting"
] |
5c55787fb12194df5fa585a39d9b61bf87fa78fe0b48ba4251b762ec99509959
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
import time
import unittest
import logging
import functools
from nose.tools import * # noqa: F403
import pytest
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from osf.models import (
Retraction,
NodeLicense,
OSFGroup,
Tag,
Preprint,
QuickFilesNode,
)
from addons.wiki.models import WikiPage
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(interval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.reg_private = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=False)
self.reg_public = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True)
self.reg_one = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True)
self.provider = factories.CollectionProviderFactory()
self.reg_provider = factories.RegistrationProviderFactory()
self.collection_one = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(creator=self.user, is_public=False, provider=self.provider)
self.reg_collection = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=True)
self.reg_collection_private = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=False)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
self.reg_collection.collect_object(self.reg_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_private.collect_object(self.node_two, self.user)
self.reg_collection_private.collect_object(self.reg_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
self.reg_collection.is_public = False
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
self.reg_collection_private.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
self.reg_collection.is_public = True
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
self.collection_public.delete()
self.reg_collection.delete()
assert_true(self.collection_public.deleted)
assert_true(self.reg_collection.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_public.remove_object(self.node_one)
self.reg_collection.remove_object(self.reg_public)
assert_false(self.node_one.is_collected)
assert_false(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_collection_submission_doc_structure(self):
self.collection_public.collect_object(self.node_one, self.user)
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
with run_celery_tasks():
self.node_one.title = 'Keita Royal Family of Mali'
self.node_one.save()
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
assert_equal(docs[0]['_source']['abstract'], self.node_one.description)
assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url)
assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname)
assert_equal(docs[0]['_source']['url'], self.node_one.url)
assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id,
self.node_one.collecting_metadata_list[0].collection._id))
assert_equal(docs[0]['_source']['category'], 'collectionSubmission')
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserUpdate(OsfTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = factories.UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = factories.UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = factories.UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
@pytest.mark.enable_quickfiles_creation
def test_merged_user(self):
user = factories.UserFactory(fullname='Annie Lennox')
merged_user = factories.UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestOSFGroup(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestOSFGroup, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.user_two = factories.UserFactory(fullname='Grapes McGee')
self.group = OSFGroup(
name='Cornbread',
creator=self.user,
)
self.group.save()
self.project = factories.ProjectFactory(is_public=True, creator=self.user, title='Biscuits')
self.project.save()
def test_create_osf_group(self):
title = 'Butter'
group = OSFGroup(name=title, creator=self.user)
group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
def test_set_group_name(self):
title = 'Eggs'
self.group.set_group_name(title)
self.group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
docs = query('Cornbread')['results']
assert_equal(len(docs), 0)
def test_add_member(self):
self.group.make_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.make_manager(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.remove_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 0)
def test_connect_to_node(self):
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 1)
self.project.remove_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 0)
def test_remove_group(self):
group_name = self.group.name
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 1)
self.group.remove_group()
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 0)
docs = query(group_name)['results']
assert_equal(len(docs), 0)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPreprint(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPreprint, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.preprint = Preprint(
title='Red Special',
description='We are the champions',
creator=self.user,
provider=factories.PreprintProviderFactory()
)
self.preprint.save()
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint = factories.PreprintFactory(
creator=self.user,
title='My Fairy King',
description='Under pressure',
)
def test_new_preprint_unsubmitted(self):
# Verify that an unsubmitted preprint is not present in Elastic Search.
title = 'Apple'
self.preprint.title = title
self.preprint.save()
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_new_preprint_unpublished(self):
# Verify that an unpublished preprint is not present in Elastic Search.
title = 'Banana'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_unsubmitted_preprint_primary_file(self):
# Unpublished preprint's primary_file not showing up in Elastic Search
title = 'Cantaloupe'
self.preprint.title = title
self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_publish_preprint(self):
title = 'Date'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
self.preprint.set_published(True, auth=Auth(self.preprint.creator), save=True)
assert self.preprint.title == title
docs = query(title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 2)
def test_preprint_title_change(self):
title_original = self.published_preprint.title
new_title = 'New preprint title'
self.published_preprint.set_title(new_title, auth=Auth(self.user), save=True)
docs = query('category:preprint AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_title)['results']
assert_equal(len(docs), 1)
def test_preprint_description_change(self):
description_original = self.published_preprint.description
new_abstract = 'My preprint abstract'
self.published_preprint.set_description(new_abstract, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
docs = query('category:preprint AND ' + description_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_abstract)['results']
assert_equal(len(docs), 1)
def test_set_preprint_private(self):
# Not currently an option for users, but can be used for spam
self.published_preprint.set_privacy('private', auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 0)
def test_set_primary_file(self):
# Only primary_file should be in index, if primary_file is changed, other files are removed from index.
self.file = OsfStorageFile.create(
target=self.published_preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[1]['name'], self.file.name)
def test_set_license(self):
license_details = {
'id': 'NONE',
'year': '2015',
'copyrightHolders': ['Iron Man']
}
title = 'Elderberry'
self.published_preprint.title = title
self.published_preprint.set_preprint_license(license_details, Auth(self.user), save=True)
assert self.published_preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[0]['license']['copyright_holders'][0], 'Iron Man')
assert_equal(docs[0]['license']['name'], 'No license')
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
self.published_preprint.remove_tag(tag, Auth(self.user), save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
# with run_celery_tasks():
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
self.published_preprint.remove_contributor(user2, Auth(self.user))
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2)
self.published_preprint.set_visible(user2, False, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.published_preprint.set_visible(user2, True, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_move_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == self.user.fullname
docs[0]['contributors'][1]['fullname'] == user2.fullname
self.published_preprint.move_contributor(user2, Auth(self.user), 0)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == user2.fullname
docs[0]['contributors'][1]['fullname'] == self.user.fullname
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
docs = query(self.published_preprint.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('website.project.tasks.update_node_share')
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self, mock_registration_updated):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_tasks():
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPublicNodes(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPublicNodes, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.component = factories.NodeFactory(
parent=self.project,
description='',
title=self.title,
creator=self.user,
is_public=True
)
self.registration = factories.RegistrationFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration.archive_job.target_addons = []
self.registration.archive_job.status = 'SUCCESS'
self.registration.archive_job.save()
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_search_node_partial(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Blue')['results']
assert_equal(len(find), 1)
def test_search_node_partial_with_sep(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Express')['results']
assert_equal(len(find), 1)
def test_search_node_not_name(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Green Flyer-Slow')['results']
assert_equal(len(find), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_false(docs[0]['parent_title'])
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
with run_celery_tasks():
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
with run_celery_tasks():
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True
)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.project, key, value, self.consolidate_auth)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
wp = WikiPage.objects.create_for_node(self.project, 'home', wiki_content, self.consolidate_auth)
with run_celery_tasks():
wp.update(self.user, '')
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
with run_celery_tasks():
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestAddContributor(OsfTestCase):
# Tests of the search.search_contributor method
def setUp(self):
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
with run_celery_tasks():
super(TestAddContributor, self).setUp()
self.user = factories.UserFactory(fullname=self.name1)
self.user3 = factories.UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = factories.UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
with run_celery_tasks():
unreg = factories.UnregUserFactory(fullname='Robert Paulson')
self.project = factories.ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_profile(self):
orcid = '123456'
user = factories.UserFactory()
user.social['orcid'] = orcid
user.save()
contribs = search.search_contributor(orcid)
assert_equal(len(contribs['users']), 1)
assert_equal(len(contribs['users'][0]['social']), 1)
assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid'])
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProjectSearchResults(OsfTestCase):
def setUp(self):
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
with run_celery_tasks():
super(TestProjectSearchResults, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project_singular = factories.ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = factories.ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = factories.ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = factories.ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
time.sleep(1)
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserSearchResults(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestUserSearchResults, self).setUp()
self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
factories.UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._client = search.search_engine.CLIENT
search.search_engine.CLIENT = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.CLIENT = cls._client
@requires_search
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project = factories.ProjectFactory(
title='Tom Sawyer',
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchMigration(OsfTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
populate_institutions(default_args=True)
self.es = search.search_engine.CLIENT
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
self.project = factories.ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
self.preprint = factories.PreprintFactory(
creator=self.user
)
def test_first_migration_no_remove(self):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_remove(self):
for n in range(1, 21):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_first_migration_with_remove(self):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_remove(self):
for n in range(1, 21, 2):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
def test_migration_institutions(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
institution_bucket_found = False
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query)
for bucket in res['aggregations']['counts']['buckets']:
if bucket['key'] == u'institution':
institution_bucket_found = True
assert_equal(institution_bucket_found, True)
def test_migration_collections(self):
provider = factories.CollectionProviderFactory()
collection_one = factories.CollectionFactory(is_public=True, provider=provider)
collection_two = factories.CollectionFactory(is_public=True, provider=provider)
node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True)
collection_one.collect_object(node, self.user)
collection_two.collect_object(node, self.user)
assert node.is_collected
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query)
assert res['hits']['total'] == 2
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchFiles(OsfTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.node = factories.ProjectFactory(is_public=True, title='Otis')
self.osf_storage = self.node.get_addon('osfstorage')
self.root = self.osf_storage.get_root()
def test_search_file(self):
self.root.append_file('Shake.wav')
find = query_file('Shake.wav')['results']
assert_equal(len(find), 1)
def test_search_file_name_without_separator(self):
self.root.append_file('Shake.wav')
find = query_file('Shake')['results']
assert_equal(len(find), 1)
def test_delete_file(self):
file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav')
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 1)
file_.delete()
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 0)
def test_add_tag(self):
file_ = self.root.append_file('That\'s How Strong My Love Is.mp3')
tag = Tag(name='Redding')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Redding')['results']
assert_equal(len(find), 1)
def test_remove_tag(self):
file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3')
tag = Tag(name='Blue')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 1)
file_.tags.remove(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 0)
def test_make_node_private(self):
self.root.append_file('Change_Gonna_Come.wav')
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 1)
self.node.is_public = False
with run_celery_tasks():
self.node.save()
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 0)
def test_make_private_node_public(self):
self.node.is_public = False
self.node.save()
self.root.append_file('Try a Little Tenderness.flac')
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 0)
self.node.is_public = True
with run_celery_tasks():
self.node.save()
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 1)
def test_delete_node(self):
node = factories.ProjectFactory(is_public=True, title='The Soul Album')
osf_storage = node.get_addon('osfstorage')
root = osf_storage.get_root()
root.append_file('The Dock of the Bay.mp3')
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 1)
node.is_deleted = True
with run_celery_tasks():
node.save()
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 0)
def test_file_download_url_guid(self):
file_ = self.root.append_file('Timber.mp3')
file_guid = file_.get_guid(create=True)
file_.save()
find = query_file('Timber.mp3')['results']
assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/')
def test_file_download_url_no_guid(self):
file_ = self.root.append_file('Timber.mp3')
path = file_.path
deep_url = '/' + file_.target._id + '/files/osfstorage' + path + '/'
find = query_file('Timber.mp3')['results']
assert_not_equal(file_.path, '')
assert_equal(file_.path, path)
assert_equal(find[0]['guid_url'], None)
assert_equal(find[0]['deep_url'], deep_url)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_files_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 1)
assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id)
@pytest.mark.enable_quickfiles_creation
def test_qatest_quickfiles_files_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
file = quickfiles_root.append_file('GreenLight.mp3')
tag = Tag(name='qatest')
tag.save()
file.tags.add(tag)
file.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_spam_user_files_do_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
self.node.creator.disable_account()
self.node.creator.confirm_spam()
self.node.creator.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
|
saradbowman/osf.io
|
osf_tests/test_elastic_search.py
|
Python
|
apache-2.0
| 61,608
|
[
"Brian"
] |
6176d9588d7c7a3ab7ad45fa83c58fbe179a1aa465689cd8540684ccb0e122bf
|
"""
Lemur
=====
Is a TLS management and orchestration tool.
:copyright: (c) 2015 by Netflix, see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
from __future__ import absolute_import
import json
import os.path
import datetime
from distutils import log
from distutils.core import Command
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
install_requires = [
'Flask==0.10.1',
'Flask-RESTful==0.3.3',
'Flask-SQLAlchemy==2.0',
'Flask-Script==2.0.5',
'Flask-Migrate==1.4.0',
'Flask-Bcrypt==0.6.2',
'Flask-Principal==0.4.0',
'Flask-Mail==0.9.1',
'SQLAlchemy-Utils==0.30.11',
'BeautifulSoup4',
'requests==2.7.0',
'psycopg2==2.6.1',
'arrow==0.5.4',
'boto==2.38.0', # we might make this optional
'six==1.9.0',
'gunicorn==19.3.0',
'pycrypto==2.6.1',
'cryptography==1.0.1',
'pyopenssl==0.15.1',
'pyjwt==1.0.1',
'xmltodict==0.9.2',
'lockfile==0.10.2',
'future==0.15.0',
]
tests_require = [
'pyflakes',
'moto==0.4.6',
'nose==1.3.7',
'pytest==2.7.2',
'pytest-flask==0.8.1'
]
docs_require = [
'sphinx',
'sphinxcontrib-httpdomain'
]
dev_requires = [
'flake8>=2.0,<2.1',
]
class SmartInstall(install):
"""
Installs Lemur into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'lemur/static/dist'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
class DevelopWithBuildStatic(develop):
def install_for_development(self):
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'lemur-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
log.info("running [npm install --quiet] in {0}".format(ROOT))
try:
check_output(['npm', 'install', '--quiet'], cwd=ROOT)
log.info("running [gulp build]")
check_output([os.path.join(ROOT, 'node_modules', '.bin', 'gulp'), 'build'], cwd=ROOT)
log.info("running [gulp package]")
check_output([os.path.join(ROOT, 'node_modules', '.bin', 'gulp'), 'package'], cwd=ROOT)
except Exception as e:
log.warn("Unable to build static content")
setup(
name='lemur',
version='0.1.3',
author='Kevin Glisson',
author_email='kglisson@netflix.com',
url='https://github.com/netflix/lemur',
download_url='https://github.com/Netflix/lemur/archive/0.1.3.tar.gz',
description='Certificate management and orchestration service',
long_description=open(os.path.join(ROOT, 'README.rst')).read(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'docs': docs_require,
'dev': dev_requires,
},
cmdclass={
'build_static': BuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall
},
entry_points={
'console_scripts': [
'lemur = lemur.manage:main',
],
'lemur.plugins': [
'verisign_issuer = lemur.plugins.lemur_verisign.plugin:VerisignIssuerPlugin',
'aws_destination = lemur.plugins.lemur_aws.plugin:AWSDestinationPlugin',
'aws_source = lemur.plugins.lemur_aws.plugin:AWSSourcePlugin',
'email_notification = lemur.plugins.lemur_email.plugin:EmailNotificationPlugin',
],
},
classifiers=[
'Framework :: Flask',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
]
)
|
rhoml/lemur
|
setup.py
|
Python
|
apache-2.0
| 4,716
|
[
"GULP"
] |
19b3305531ed6d1b0dea4e22b7ef243360fea306179291a806ba3f7360a911c5
|
import numpy as np
class ModelParameters(object):
'''
In this class the model parameters are specified. It contains a lot of information which is (not always) necessary to run Chempy.
The individual definitions are given as comments.
'''
# Which zero point of abundances shall be used. Asplund 2005 is corrected to VESTA abundances
solar_abundance_name_list = ['Lodders09','Asplund09','Asplund05_pure_solar','Asplund05_apogee_correction', 'AG89']
solar_abundance_name_index = 1
solar_abundance_name = solar_abundance_name_list[solar_abundance_name_index]
# Observational constraints
#stellar_identifier_list = ['Proto-sun', 'Arcturus', 'B-stars']
#stellar_identifier_list = ['2M01233744+3414451', '2M02484368+3106550', '2M05510326+1129561', '2M09031459+0648573', '2M09422500+4846338', '2M02011031+2426397', '2M09055837+0505324', '2M20092234+5601366']
#indices = [78,130,122,156,113,34, 128,167] # low alpha sequence
#indices = [0, 163, 27, 98, 95, 17, 71, 79] # random
#indices = [158, 24, 152, 56, 100, 21, 17, 126] # This is the list for middle alpha sequence
#indices = [147, 0, 3, 128, 1, 156, 113, 110] # extremes in alpha over iron space
#indices = [85, 94, 15, 110, 30, 11, 7, 3] # high alpha sequence
#indices = [78,130,122,156,113,34, 128,167,85, 94, 15, 110, 30, 11, 7, 3] #low alpha + high alpha
#indices = [0, 163, 27, 98, 95, 17, 71, 79, 78,130,122,156,113,34, 128,167,85, 94, 15, 110, 30, 11, 7, 3] #low alpha + high alpha + random
#stellar_identifier_list = []
#for item in indices:
# stellar_identifier_list.append("Rob_%d" %item)
#stellar_identifier_list = ['Proto-sun', 'Arcturus', 'B-stars']
# 'prior' can be used as stellar_identifier, then the prior will be sampled with Chempy.wrapper.mcmc() routine
stellar_identifier_list = ['Proto-sun']
stellar_identifier = 'Proto-sun'
# Convergense parameters of minimization and MCMC
maxiter_minimization = 500
min_mcmc_iterations = 300
mcmc_tolerance = 0.5
gibbs_sampler_tolerance = 1e-1
gibbs_sampler_maxiter = 10
tol_minimization = 1e-1
nwalkers = 64
mburn = 1
save_state_every = 1
m = 1000 # For 7 free parameters 300 iterations are usually enough. The mcmc routine is stopping after 300 if the posterior mean is converged for more than 200 iterations.
error_marginalization = False # Marginalizing over the model error or using the best model error value
flat_model_error_prior = [0.,1.,51] # Flat prior for the error marginalization [begin, end, number of evaluations inbetween]
beta_error_distribution = [True, 1, 10] # Instead of a flat prior for the error marginalization we use a beta distribution with a = 1 and b = 3 as default (wikipedia and scipy have the same parametrization) putting more weight to small model errors
zero_model_error = True # a boolean that can be used to restore the old Chempy behaviour of 0 model error, will only work if error_marginalization is set to False
send_email = False
verbose = 0
# Time discretization, so far only linear time-steps are implemented
start = 0 # birth of disc, always set to 0
end = 13.5
time_steps = 28#541#241#35#1401
total_mass = 1#45.07
stochastic_IMF = False
number_of_models_overplotted = 1 ### with the positions from an mcmc run
testing_output = False
summary_pdf = False
name_string = 'Chempy_default'
parameter_names = [r'$\alpha_\mathrm{IMF}$',r'$\log_{10}\left(\mathrm{N}_\mathrm{Ia}\right)$',r'$\log_{10}\left(\tau_\mathrm{Ia}\right)$',r'$\log_{10}\left(\mathrm{SFE}\right)$',r'$\log_{10}\left(\mathrm{SFR}_\mathrm{peak}\right)$',r'$\mathrm{x}_\mathrm{out}$']
# SFR still model A from Just&Jahreiss 2010 should be changed
# arbitrary function can be implemented here
basic_sfr_name_list = ['model_A', 'gamma_function', 'prescribed', 'doubly_peaked', 'normal']
basic_sfr_index = 1
basic_sfr_name = basic_sfr_name_list[basic_sfr_index]
if basic_sfr_name == 'model_A':
mass_factor = 1.
S_0 = 45.07488
t_0 = 5.6
t_1 = 8.2
elif basic_sfr_name == 'gamma_function':
mass_factor = 1.
S_0 = 1#45.07488
a_parameter = 2
sfr_beginning = 0
sfr_scale = 3.5 # SFR peak in Gyr for a = 2
elif basic_sfr_name == 'prescribed':
mass_factor = 1.
name_of_file = 'input/Daniel_Weisz/ic1613.lcid.final.sfh'
elif basic_sfr_name == 'doubly_peaked':
mass_factor = 1.
S_0 = 45.07488
peak_ratio = 0.8
sfr_decay = 3.5
sfr_t0 = 2.
peak1t0 = 0.8
peak1sigma = 0.8
elif basic_sfr_name == 'normal':
mass_factor = 1.
S_0 = 45.07488
sfr_peak = 2
sfr_scale = 0.5
elif basic_sfr_name == 'step':
mass_factor = 1.
S_0 = 45.07488
sfr_cutoff = 2
elif basic_sfr_name == 'non_parametric':
mass_factor = 1.
S_0 = 45.07488
sfr_breaks = (1, 2, 3)
sfr_weights = (1, 2, 1)
basic_infall_name_list = ["exponential","constant","sfr_related","peaked_sfr","gamma_function"]
basic_infall_index = 2
basic_infall_name = basic_infall_name_list[basic_infall_index]
starformation_efficiency = 0.
gas_power = 0.
if basic_infall_name == 'sfr_related':
starformation_efficiency = np.power(10,-0.3)
gas_power = 1.0
if basic_infall_name == 'exponential':
infall_amplitude = 10 # not needed just a dummy
tau_infall = -0.15
infall_time_offset = 0
c_infall = -1.
norm_infall = 0.9
if basic_infall_name == 'gamma_function':
norm_infall = 1.0 # not needed just a dummy
infall_a_parameter = 2
infall_beginning = 0
infall_scale = 3.3
yield_table_name_sn2_list = ['chieffi04','OldNugrid','Nomoto2013','Portinari_net','francois', 'chieffi04_net', 'Nomoto2013_net','NuGrid_net','West17_net','TNG_net','CL18_net','Frischknecht16_net']
yield_table_name_sn2_index = 2
yield_table_name_sn2 = yield_table_name_sn2_list[yield_table_name_sn2_index]
yield_table_name_hn_list = ['Nomoto2013']
yield_table_name_hn_index = 0
yield_table_name_hn = yield_table_name_hn_list[yield_table_name_hn_index]
##### Karakas2016 needs much more calculational resources (order of magnitude) using 2010 net yields from Karakas are faster and only N is significantly underproduced
yield_table_name_agb_list = ['Karakas','Nugrid','Karakas_net_yield','Ventura_net','Karakas16_net','TNG_net','Nomoto2013']
yield_table_name_agb_index = 2
yield_table_name_agb = yield_table_name_agb_list[yield_table_name_agb_index]
yield_table_name_1a_list = ['Iwamoto','Thielemann','Seitenzahl', 'TNG']
yield_table_name_1a_index = 2
yield_table_name_1a = yield_table_name_1a_list[yield_table_name_1a_index]
mmin = 0.1
mmax = 100
mass_steps = 5000 #2000 # 200000
imf_type_name_list = ['normed_3slope','Chabrier_1','Chabrier_2','salpeter','BrokenPowerLaw']
imf_type_index = 1
imf_type_name = imf_type_name_list[imf_type_index]
if imf_type_name == 'Chabrier_2':
chabrier_para1 = 22.8978
chabrier_para2 = 716.4
chabrier_para3 = 0.25
high_mass_slope = -2.3
imf_parameter = (22.8978, 716.4, 0.25,-2.29)
if imf_type_name == 'Chabrier_1':
chabrier_para1 = 0.69
chabrier_para2 = 0.079
high_mass_slope = -2.29
imf_parameter = (0.69, 0.079, -2.29)
if imf_type_name == 'salpeter':
imf_slope = 2.35
imf_parameter = (2.35)
if imf_type_name == 'BrokenPowerLaw':
imf_break_1 = 0.5
imf_break_2 = 1.39
imf_break_3 = 6
imf_slope_1 = -1.26
imf_slope_2 = -1.49
imf_slope_3 = -3.02
imf_slope_4 = -2.3
imf_parameter = ((0.5,1.39,6),(-1.26,-1.49,-3.02,-2.3))
if imf_type_name == 'normed_3slope':
imf_break_1 = 0.5
imf_break_2 = 1.0
imf_slope_1 = -1.3
imf_slope_2 = -2.3
imf_slope_3 = -2.29
imf_parameter = (imf_slope_1,imf_slope_2,imf_slope_3,imf_break_1,imf_break_2)
name_infall_list = ['primordial','solar','simple','alpha']
name_infall_index = 1
name_infall = name_infall_list[name_infall_index]
interpolation_list = ['linear','logarithmic']
interpolation_index = 1
interpolation_scheme = interpolation_list[interpolation_index] ## could be a variant to change the interpolation scheme
stellar_lifetimes_list = ['Argast_2000','Raiteri_1996']
stellar_lifetimes_index = 0
stellar_lifetimes = stellar_lifetimes_list[stellar_lifetimes_index] ## which stellar lifetime approximation to use
sn2_to_hn = 1.
sn2mmin = 8.
sn2mmax = 100.
bhmmin = float(sn2mmax) ## maximum of hypernova
bhmmax = float(mmax) ## maximum of the IMF
percentage_of_bh_mass = 0.25 # the rest 75% will be given back to the ISM with the abundances from the step before
agbmmin = 0.5
agbmmax = 8
sagbmmin = float(agbmmax)
sagbmmax = float(sn2mmin)
percentage_to_remnant = 0.13 # see Kobayashi 2011 the remnant mass is about 13%
time_delay_functional_form_list = ['normal','maoz','gamma_function']
time_delay_index = 1
time_delay_functional_form = time_delay_functional_form_list[time_delay_index]
if time_delay_functional_form == 'maoz':
N_0 = np.power(10,-2.75)
sn1a_time_delay = np.power(10,-0.8)
sn1a_exponent = 1.12
dummy = 0.0
sn1a_parameter = [N_0,sn1a_time_delay,sn1a_exponent,dummy]
if time_delay_functional_form == 'normal':
number_of_pn_exlopding = 0.003
sn1a_time_delay = 1.
sn1a_timescale = 3.2
sn1a_gauss_beginning = 0.25
sn1a_parameter = [number_of_pn_exlopding,sn1a_time_delay,sn1a_timescale,sn1a_gauss_beginning]
if time_delay_functional_form == 'gamma_function':
sn1a_norm = 0.0024 #number of sn1a exploding within end of simulation time per 1Msun
sn1a_a_parameter = 1.3
sn1a_beginning = 0
sn1a_scale = 3
sn1a_parameter = [sn1a_norm,sn1a_a_parameter,sn1a_beginning,sn1a_scale]
sn1ammin = 1#float(agbmmin) #Maoz Timedelay should be independent of sn1a_mmin and sn1a_mmax. N_0 just determines the number of SN1a exploding per 1Msun over the time of 15Gyr
sn1ammax = 8#float(sagbmmax)
gas_at_start = 0. #*dt yields the Msun/pc^2 value
log_time=False
gas_reservoir_mass_factor = np.power(10,0.0)#3.0
sfr_factor_for_cosmic_accretion = 1.
#shortened_sfr = False # is needed in order to renormalise the gas reservoir mass factor and the cosmic accretion so that chempy produces consistent results with full run and shortened run.
shortened_sfr_rescaling = 1.
cosmic_accretion_elements = ['H','He']
cosmic_accretion_element_fractions = [0.76,0.24]
outflow_feedback_fraction = 0.5
## various output modes
check_processes = False
only_net_yields_in_process_tables = True
calculate_model = True #just loading the outcome of the last ssp if False
####### Evaluate model
element_names = ['He','C', 'N', 'O', 'F','Ne','Na', 'Mg', 'Al', 'Si', 'P','S', 'Ar','K', 'Ca','Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni']#, 'Zn','Y', 'Ba']# Runs with sun
elements_to_trace = ['Al', 'Ar', 'B', 'Be', 'C', 'Ca', 'Cl', 'Co', 'Cr', 'Cu', 'F', 'Fe', 'Ga', 'Ge', 'H', 'He', 'K', 'Li', 'Mg', 'Mn', 'N', 'Na', 'Ne', 'Ni', 'O', 'P', 'S', 'Sc', 'Si', 'Ti', 'V', 'Zn']
#observational_constraints_index = ['sol_norm']#['gas_reservoir','sn_ratio','sol_norm']#,'wildcard ','cas','arcturus','stars_at_end', 'plot_processes', 'save_abundances', 'elements']
arcturus_age = 7.1# 7.1 +1.5 -1.2
produce_mock_data = False
use_mock_data = False
error_inflation = 1.
# If some parameter is in to optimise there needs to be a prior and constraints defined
if True:
#prior
SSP_parameters = [-2.29 ,-2.75 ,-0.8]#, -0.8 ]#,0.2]#, 0.7, 0.3, 0.0]
SSP_parameters_to_optimize = ['high_mass_slope', 'log10_N_0','log10_sn1a_time_delay'] #,'log10_beta_parameter' ]#,'log10_sfr_factor_for_cosmic_accretion']#,'log10_gas_reservoir_mass_factor','log10_a_parameter','log10_gas_power']
else:
SSP_parameters = []
SSP_parameters_to_optimize = []
assert len(SSP_parameters) == len(SSP_parameters_to_optimize)
if True:
#prior
ISM_parameters = [-0.3, 0.55, 0.5]#, 0.3]#,0.2]#, 0.7, 0.3, 0.0]
ISM_parameters_to_optimize = ['log10_starformation_efficiency', 'log10_sfr_scale', 'outflow_feedback_fraction']#,'log10_gas_reservoir_mass_factor']#,'log10_sfr_factor_for_cosmic_accretion']#,'log10_gas_reservoir_mass_factor','log10_a_parameter','log10_gas_power']
else:
ISM_parameters = []
ISM_parameters_to_optimize = []
assert len(ISM_parameters) == len(ISM_parameters_to_optimize)
p0 = np.hstack((SSP_parameters,ISM_parameters))
to_optimize = np.array(SSP_parameters_to_optimize + ISM_parameters_to_optimize)
ndim = len(to_optimize)
constraints = {
'log10_beta_parameter' : (0,None),
'high_mass_slope' : (-4.,-1.),
'log10_N_0' : (-5,-1),
'log10_sn1a_time_delay' : (-3,1.),
'log10_starformation_efficiency' : (-3,2),
'log10_sfr_scale' : (-1,1),
'sfr_scale' : (0.0,None),
'outflow_feedback_fraction' : (0.,1.),
'log10_gas_reservoir_mass_factor': (None,None),
'N_0' : (0.,1.),
'sn1a_time_delay' : (0.,end),
'a_parameter' : (0.,None),
'starformation_efficiency' : (0.,None),
'gas_power': (1.,2.),
'log10_a_parameter' : (None,None),
'log10_gas_power' : (None,None),
'log10_gas_reservoir_mass_factor': (None,None),
'log10_sfr_factor_for_cosmic_accretion': (None,None),
'mass_factor' : (0,None),
'norm_infall' : (0.,2.),
'tau_infall' : (None,None),
'c_infall' : (None,None),
'gas_at_start' : (0.,2.),
'gas_reservoir_mass_factor' : (0.,20.),
'infall_scale' : (0.0,end),
'sn1a_norm' : (0.,None),
'sn1a_scale' : (0.,None),
}
# the prior entry is (mean,std,0)
# functional form 0 is a gaussian with log values and 1 is for fractions where the sigma distances are in factors from the mean (see cem_function.py)
# for functional form 1 read (mean,factor,1)
priors = {
## gaussian priors
'log10_beta_parameter' : (1.0,0.5,0),
'high_mass_slope' : (-2.3,0.3,0),
'log10_N_0' : (-2.75,0.3,0),
'log10_sn1a_time_delay' : (-0.8,0.3,0),
'log10_starformation_efficiency' : (-0.3,0.3,0),
'log10_sfr_scale' : (0.55,0.1,0),
'sfr_scale' : (3.5,1.5,0),
'outflow_feedback_fraction' : (0.5,0.1,0),
'log10_gas_reservoir_mass_factor' : (0.3,0.3,0),
'a_parameter' : (3.,3.,0),
'infall_scale' : (3.3,0.5,0),
'gas_power': (1.5,0.2,0),
'log10_sfr_factor_for_cosmic_accretion': (0.2,0.3,0),
'log10_a_parameter' : (0.3,0.2,0),
'log10_gas_power' : (0,0.15,0),
## Priors on factors
'starformation_efficiency' : (0.5,3.,1),
'mass_factor' : (1.,1.2,1),
'norm_infall' : (1.,1.2,1),
'sn1a_time_delay' : (0.3,3.,1),
'N_0' : (0.001,3.,1),
'gas_at_start' : (0.1,2.,1),
'gas_reservoir_mass_factor' : (3.,2.,1),
}
|
jan-rybizki/Chempy
|
Chempy/parameter.py
|
Python
|
mit
| 14,117
|
[
"ChemPy",
"Gaussian"
] |
1f419baea665b86e2ba6d7681a542931717601a2e539eea412903735a241a878
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov,wenhao@mit.edu"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import subprocess
import os
import re
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.bond_valence import BVAnalyzer
from monty.tempfile import ScratchDir
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(map(Element, [
"Li", "Na", "K", # alkali metals
"Be", "Mg", "Ca", # alkaline metals
"Al", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ge", "As",
"Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb",
"Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi",
"La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er",
"Tm", "Yb", "Lu"
]))
_gulp_kw = {
#Control of calculation type
"angle", "bond", "cosmo", "cosmic", "cost", "defect", "distance",
"eem", "efg", "fit", "free_energy", "gasteiger", "genetic",
"gradients", "md", "montecarlo", "noautobond", "noenergy", "optimise",
"pot", "predict", "preserve_Q", "property", "phonon", "qeq", "qbond",
"single", "sm", "static_first", "torsion", "transition_state",
#Geometric variable specification
"breathe", "bulk_noopt", "cellonly", "conp", "conv", "isotropic",
"orthorhombic", "nobreathe", "noflgs", "shell", "unfix",
#Algorithm
"c6", "dipole", "fbfgs", "fix_molecule", "full", "hill", "kfull",
"marvinSE", "madelung", "minimum_image", "molecule", "molmec", "molq",
"newda", "noanisotropic_2b", "nod2sym", "nodsymmetry",
"noelectrostatics", "noexclude", "nofcentral", "nofirst_point",
"noksymmetry", "nolist_md", "nomcediff", "nonanal", "noquicksearch",
"noreal", "norecip", "norepulsive", "nosasinitevery", "nosderv",
"nozeropt", "numerical", "qiter", "qok", "spatial", "storevectors",
"nomolecularinternalke", "voight", "zsisa",
#Optimisation method
"conjugate", "dfp", "lbfgs", "numdiag", "positive", "rfo", "unit",
#Output control
"average", "broaden_dos", "cartesian", "compare", "conserved",
"dcharge", "dynamical_matrix",
"eigenvectors", "global", "hessian", "hexagonal", "intensity", "linmin",
"meanke", "nodensity_out", "nodpsym", "nofirst_point", "nofrequency",
"nokpoints", "operators", "outcon", "prt_eam", "prt_two",
"prt_regi_before", "qsas", "restore", "save", "terse",
#Structure control
"full", "hexagonal", "lower_symmetry", "nosymmetry",
#PDF control
"PDF", "PDFcut", "PDFbelow", "PDFkeep", "coreinfo", "nowidth", "nopartial",
#Miscellaneous
"nomodcoord", "oldunits", "zero_potential"
}
class GulpIO:
"""
To generate GULP input and process output
"""
def keyword_line(self, *args):
"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
\\*args: 1st line keywords
"""
#if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
def structure_lines(self, structure, cell_flg=True, frac_flg=True,
anion_shell_flg=True, cation_shell_flg=False,
symm_flg=True):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = [str(i) for i in [l.a, l.b, l.c, l.alpha, l.beta,
l.gamma]]
gin += " ".join(lat_str) + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if ((specie in _anions and anion_shell_flg) or
(specie in _cations and cation_shell_flg)):
shel_site_desc = specie.symbol + " shel " + " ".join(
coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
return gin
def specie_potential_lines(self, structure, potential, **kwargs):
"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
\\*\\*kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented."
"\nUse library_line instead")
def library_line(self, file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = lambda: 'GULP_LIB' in os.environ.keys()
readable = lambda f: os.path.isfile(f) and os.access(f, os.R_OK)
#dirpath, fname = os.path.split(file_name)
#if dirpath: # Full path specified
# if readable(file_name):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
#else:
# fpath = os.path.join(os.getcwd(), file_name) # Check current dir
# if readable(fpath):
# gin = 'library ' + fpath
# elif gulplib_set():
# fpath = os.path.join(os.environ['GULP_LIB'], file_name)
# if readable(fpath):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
# else:
# raise GulpError('GULP Library not found')
#gin += "\n"
#return gin
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = 'library ' + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = 'library ' + fpath
elif gulplib_set(): # Check the GULP_LIB path
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = 'library ' + file_name
if gin:
return gin + "\n"
else:
raise GulpError('GULP Library not found')
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
def buckingham_potential(self, structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
#If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
#Try bush library first
bpb = BuckinghamPotential('bush')
bpl = BuckinghamPotential('lewis')
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r'[1-9,+,\-]', '', key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]['oxi']:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]['inp_str']
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
#Try lewis library next if element is not in bush
#use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + '_' + str(int(val_dict[key])) + '+'
if k not in bpl.species_dict.keys():
#use_lewis = False
raise GulpError("Element {} not in library".format(k))
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += 'spring\n'
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
#gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure, cell_flg=periodic, frac_flg=periodic,
anion_shell_flg=False, cation_shell_flg=False, symm_flg=not uc
)
gin += self.tersoff_potential(structure)
return gin
def tersoff_potential(self, structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key in el_val_dict.keys():
if key != "O" and el_val_dict[key] % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(el_val_dict[key]) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key in el_val_dict.keys():
if key != "O":
metal = key + "(" + str(int(el_val_dict[key])) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
def get_energy(self, gout):
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
else:
raise GulpError("Energy not found in Gulp output")
def get_relaxed_structure(self, gout):
#Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
elif "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
else:
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != '--':
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
else:
i += 1
#Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == 'c':
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise IOError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller:
"""
Class to run gulp from commandline
"""
def __init__(self, cmd='gulp'):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
with ScratchDir("."):
p = subprocess.Popen(
self._gulp_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd='gulp'):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd='gulp',
keywords=('optimise', 'conp', 'qok'),
valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure,
gulp_cmd='gulp',
keywords=('optimise', 'conp'),
valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential:
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
assert bush_lewis_flag in {'bush', 'lewis'}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
with open(os.path.join(os.environ["GULP_LIB"], pot_file), 'rt') as f:
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {'inp_str': '', 'oxi': 0}
species_dict[elmnt]['inp_str'] += row
species_dict[elmnt]['oxi'] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split('_')[0]
#oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + \
row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split('_')[0]
#oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(
row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
#Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential:
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials"), "r") as f:
data = dict()
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
montoyjh/pymatgen
|
pymatgen/command_line/gulp_caller.py
|
Python
|
mit
| 26,921
|
[
"GULP",
"pymatgen"
] |
5d090d36fd263ff3a6667df1938b9c168035800f371572d7ff21222b72e61548
|
#!/usr/bin/env python2.7
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Display a matrix of capabilities and network devices.
The matrix has a row for each capability discovered on the network.
The matrix has a column for each network device.
Where each row and column intersect, the presence of that capability, on that
device, is indicated by the appearance of the revision (of the capability) or
blank if incapable.
When the network contains identical devices, the columns of the matrix are
identical (and therefore redundant). This is often the case in a demonstration.
However, when different models and products of network devices are combined,
the matrix is a rich source of information. It provides a convenient and concise
summary of a heterogeneous network.
All the information in the matrix is obtained from a single (HTTP) request to
the Controller.
"""
from __future__ import print_function as _print_function
from basics.inventory import capability_discovery
from itertools import chain
from basics.context import sys_exit, EX_OK, EX_TEMPFAIL
from basics.render import print_table
from pydoc import render_doc as doc, plain
from inspect import cleandoc
def demonstrate(discoveries):
# Structure of map is {capability-id : revision-by-device}
# where capability-id is (capability-name, capability-namespace)
# and revision-by-device is {device-name : capability-revision}.
revision_by_capability = {}
device_names = set()
# Visit each discovery once and collect the capability identifier as a key
# and the capability revision as a (nested) value.
# During this single pass of discoveries, collect device names too.
for discovered in discoveries:
capability_id = (discovered.capability.name, discovered.capability.namespace)
revision_by_device = revision_by_capability.get(capability_id, {})
if not revision_by_device:
revision_by_capability[capability_id] = revision_by_device
revision_by_device[discovered.device_name] = discovered.capability.revision
device_names.add(discovered.device_name)
# Order the devices alphabetically by name.
# This makes the column order deterministic.
device_names = sorted(device_names)
# Flatten the dict into a 2D table.
# Structure of table is [row, ...]
# where row is (capability-name, capability-namespace, revision, ...)
# where revision, ... is ordered list of one revision per device.
table = [
tuple(chain(capability_id, [revision_by_device.get(device_name, '') for device_name in device_names]))
for (capability_id, revision_by_device) in revision_by_capability.items()
]
# Order the table by capability name.
# This makes the row order deterministic.
table.sort()
headers = tuple(chain(('capability-name', 'capability-namespace'), device_names))
print_table(table, headers=headers)
def main():
print(cleandoc(__doc__))
print()
print('capability_discovery()')
discoveries = capability_discovery()
if not discoveries:
print("There are no capable network devices. Demonstration cancelled.")
return EX_TEMPFAIL
demonstrate(discoveries)
return EX_OK
if __name__ == "__main__":
try:
sys_exit(main())
finally:
print()
print('Function Reference:')
print(plain(doc(capability_discovery)))
|
tbarrongh/cosc-learning-labs
|
src/learning_lab/02_capability_matrix.py
|
Python
|
apache-2.0
| 4,027
|
[
"VisIt"
] |
a984c4c95ffbb30e43cdc4e4e441106001c0186d3f19c7e8bab209c21338e1bd
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 23 10:17:53 2014
@author: ibackus
"""
# External modules
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
# diskpy modules
from diskpy.pdmath import smoothstep
from diskpy.utils import match_units
def make_profile(ICobj):
"""
A wrapper for generating surface density profiles according to the IC object.
Settings for the profile are defined in ICobj.settings. Which profile gets
used is defined by ICobj.settings.sigma.kind
Currently available kinds are:
viscous
powerlaw
MQWS
**RETURNS**
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
kind = ICobj.settings.sigma.kind
if kind == 'powerlaw':
r, sigma = powerlaw(ICobj.settings, ICobj.T)
elif (kind == 'mqws') | (kind == 'MQWS'):
r, sigma = MQWS(ICobj.settings, ICobj.T)
elif (kind == 'viscous'):
r, sigma = viscous(ICobj.settings)
elif (kind == 'gaussring'):
r, sigma = gaussian_ring(ICobj.settings)
else:
raise TypeError, 'Could not make profile for kind {0}'.format(kind)
if hasattr(ICobj.settings.sigma, 'innercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.innercut, False)
if hasattr(ICobj.settings.sigma, 'outercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.outercut, True)
return r, sigma
def _applycut(r, sigma, rcut, outer=True):
"""
Applies a hard cut to a surface density profile (sigma). If outer=True,
sigma = 0 at r > rcut. Otherwise, sigma = 0 at r < rcut. If rcut is
None, inf, or nan no cut is performed.
"""
if rcut is None:
return sigma
elif np.isnan(rcut) or np.isinf(rcut):
return sigma
if outer:
mask = r > rcut
else:
mask = r < rcut
if np.any(mask):
sigma[mask] = 0
return sigma
def gaussian_ring(settings):
"""
Generates a gaussian ring surface density profile according to:
.. math:: \\Sigma = \\Sigma_0 exp(-(R-R_d)^2/2a^2)
.. math:: \\Sigma_0 = M_d/(2\\pi)^{3/2} a R_d
Here we call a the ringwidth.
The max radius is determined automatically
Parameters
----------
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
Returns
-------
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
ringwidth = settings.sigma.ringwidth
n_points = settings.sigma.n_points
m_disk = settings.sigma.m_disk
Rmax = (Rd + 5*ringwidth).in_units(Rd.units)
Rmax = max(Rmax, Rd*2.0)
R = SimArray(np.linspace(0, Rmax, n_points), Rd.units)
sigma0 = m_disk / (ringwidth * Rd)
sigma0 *= (2*np.pi)**-1.5
expArg = -(R-Rd)**2 / (2*ringwidth**2)
expArg.convert_units('1')
sigma = sigma0 * np.exp(expArg)
return R, sigma
def viscous(settings):
"""
Generates a surface density profile derived from a self-similarity solution
for a viscous disk, according to:
sigma ~ r^-gamma exp(-r^(2-gamma))
Where r is a dimensionless radius and gamma is a constant less than 2.
Rd (disk radius) is defined as the radius containing 95% of the disk mass
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
n_points = settings.sigma.n_points
gamma = settings.sigma.gamma
m_disk = settings.sigma.m_disk
# Define the fraction of mass contained within Rd
A = 0.95
# Normalization for r
R1 = Rd / (np.log(1/(1-A))**(1/(2-gamma)))
Rmax = rmax * Rd
Rin = rin * Rd
R = np.linspace(0, Rmax, n_points)
r = (R/R1).in_units('1')
sigma = (r**-gamma) * np.exp(-r**(2-gamma)) * (m_disk/(2*np.pi*R1*R1)) * (2-gamma)
# Deal with infinities at the origin with a hard cut off
sigma[0] = sigma[1]
# Apply interior cutoff
cut_mask = R < Rin
if np.any(cut_mask):
sigma[cut_mask] *= smoothstep(r[cut_mask],degree=21,rescale=True)
return R, sigma
def powerlaw(settings, T = None):
"""
Generates a surface density profile according to a powerlaw sigma ~ r^p
with a smooth interior cutoff and smooth exterior exponential cutoff.
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable function
Function that returns temperature of the disk as a function of radius
IF none, a powerlaw temperature is assumed
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Parse settings
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
cutlength = settings.sigma.cutlength
Mstar = settings.physical.M
Qmin = settings.sigma.Qmin
n_points = settings.sigma.n_points
m = settings.physical.m
power = settings.sigma.power
gamma = settings.physical.gamma_cs()
if T is None:
# If no callable object to calculate Temperature(R) is provided,
# default to a powerlaw T ~ R^-q
T0 = SimArray([129.0],'K') # Temperature at 1 AU
R0 = SimArray([1.0],'au')
q = 0.59
def T(x):
return T0 * np.power((x/R0).in_units('1'),-q)
Rd = match_units(pynbody.units.au, Rd)[1]
Mstar = match_units(pynbody.units.Msol, Mstar)[1]
# Molecular weight
m = match_units(m, pynbody.units.m_p)[0]
# Maximum R to calculate sigma at (needed for the exponential cutoff region)
Rmax = rmax*Rd
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Initialize stuff
A = SimArray(1.0,'Msol')/(2*np.pi*np.power(Rd,2))
# dflemin3 Nov. 4, 2015
# Made units more explicit via SimArrays
r_units = Rd.units
R = SimArray(np.linspace(0,Rmax,n_points),r_units)
r = R/Rd
# Calculate sigma
# Powerlaw
#dflemin3 edit 06/10/2015: Try powerlaw of the form sigma ~ r^power
sigma = A*np.power(r,power)
sigma[0] = 0.0
# Exterior cutoff
sigma[r>1] *= np.exp(-(r[r>1] - 1)**2 / (2*cutlength**2))
# Interior cutoff
sigma[r<rin] *= smoothstep(r[r<rin],degree=21,rescale=True)
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
# Rescale sigma to meet the minimum Q requirement
sigma *= Q.min()/Qmin
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
return R, sigma
def MQWS(settings, T):
"""
Generates a surface density profile as the per method used in Mayer, Quinn,
Wadsley, and Stadel 2004
** ARGUMENTS **
NOTE: if units are not supplied, assumed units are AU, Msol
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable
A function to calculate temperature as a function of radius
** RETURNS **
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Load in settings
n_points = settings.sigma.n_points
rin = settings.sigma.rin
rout = settings.sigma.rout
rmax = settings.sigma.rmax
Qmin = settings.sigma.Qmin
m = settings.physical.m
Mstar = settings.physical.M
#m_disk = settings.sigma.m_disk
rin = match_units(pynbody.units.au, rin)[1]
rout = match_units(pynbody.units.au, rout)[1]
#m_disk = match_units(pynbody.units.Msol, m_disk)[1]
if rmax is None:
rmax = 2.5 * rout
else:
rmax = match_units(pynbody.units.au, rmax)[1]
r = np.linspace(0, rmax, n_points)
a = (rin/r).in_units('1')
b = (r/rout).in_units('1')
sigma = (np.exp(-a**2 - b**2)/r) * Mstar.units/r.units
# Calculate Q
Q = np.sqrt(Mstar*kB*T(r)/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
sigma *= np.nanmin(Q)/Qmin
# Remove all nans
sigma[np.isnan(sigma)] = 0.0
return r, sigma
|
ibackus/diskpy
|
diskpy/ICgen/sigma_profile.py
|
Python
|
mit
| 9,111
|
[
"Gaussian"
] |
6c863b17f9b541ad0f391a0cab45d5c212e4e713cbe3c1ff0192b057f10bb242
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
from . import framework
from . import core
from .framework import in_dygraph_mode, default_main_program
import numpy as np
from .core import VarDesc
from . import unique_name
from .data_feeder import check_variable_and_dtype, check_type, check_dtype
from paddle import _C_ops
__all__ = [
'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',
'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer',
'MSRAInitializer', 'NumpyArrayInitializer', 'set_global_initializer'
]
_global_weight_initializer_ = None
_global_bias_initializer_ = None
class Initializer(object):
"""Base class for variable initializers
Defines the common interface of variable initializers.
They add operations to the init program that are used
to initialize variables. Users should not use this class
directly, but need to use one of its implementations.
"""
def __init__(self):
pass
def __call__(self, param, block=None):
"""Add corresponding initialization operations to the network
"""
raise NotImplementedError()
def _check_block(self, block):
if block is None:
block = default_main_program().global_block()
return block
def _compute_fans(self, var):
"""Compute the fan_in and the fan_out for layers
This method computes the fan_in and the fan_out
for neural network layers, if not specified. It is
not possible to perfectly estimate fan_in and fan_out.
This method will estimate it correctly for matrix multiply and
convolutions.
Args:
var: variable for which fan_in and fan_out have to be computed
Returns:
tuple of two integers (fan_in, fan_out)
"""
shape = var.shape
if not shape or len(shape) == 0:
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
# This is the case for simple matrix multiply
fan_in = shape[0]
fan_out = shape[1]
else:
# Assume this to be a convolutional kernel
# In PaddlePaddle, the shape of the kernel is like:
# [num_filters, num_filter_channels, ...] where the remaining
# dimensions are the filter_size
receptive_field_size = np.prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
return (fan_in, fan_out)
class ConstantInitializer(Initializer):
"""Implements the constant initializer
Args:
value (float32): constant value to initialize the variable
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
fc = fluid.layers.fc(
input=x,
size=10,
param_attr=fluid.initializer.Constant(value=2.0))
"""
def __init__(self, value=0.0, force_cpu=False):
assert value is not None
super(ConstantInitializer, self).__init__()
self._value = value
self._force_cpu = force_cpu
def __call__(self, var, block=None):
"""Initialize the input tensor with constant.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert (isinstance(var, framework.Variable) or
isinstance(var, framework.EagerParamBase))
assert isinstance(block, framework.Block)
if framework.in_dygraph_mode():
var = _C_ops.fill_constant(
var, 'value',
float(self._value), 'force_cpu', self._force_cpu, 'dtype',
int(var.dtype), 'str_value',
str(float(self._value)), 'shape', var.shape)
return None
else:
# fill constant should set the "str_value" to preserve precision
op = block.append_op(
type="fill_constant",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": int(var.dtype),
"value": float(self._value),
'str_value': str(float(self._value)),
'force_cpu': self._force_cpu
},
stop_gradient=True)
var.op = op
return op
class UniformInitializer(Initializer):
"""Implements the random uniform distribution initializer
Args:
low (float): lower boundary of the uniform distribution
high (float): upper boundary of the uniform distribution
seed (int): random seed
diag_num (int): the number of diagonal elements to initialize.
If set to 0, diagonal initialization will be not performed.
diag_step (int): Step size between two diagonal elements,
which is generally the width of the square matrix.
diag_val (float): the value of the diagonal element to be initialized,
default 1.0. It takes effect only if the diag_num is greater than 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 1], dtype='float32')
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5))
"""
def __init__(self,
low=-1.0,
high=1.0,
seed=0,
diag_num=0,
diag_step=0,
diag_val=1.0):
assert low is not None
assert high is not None
assert high >= low
assert seed is not None
assert diag_num is not None
assert diag_step is not None
assert diag_val is not None
if diag_num > 0 or diag_step > 0:
assert (diag_num > 0 and diag_step > 0)
super(UniformInitializer, self).__init__()
self._low = low
self._high = high
self._seed = seed
self._diag_num = diag_num
self._diag_step = diag_step
self._diag_val = diag_val
def __call__(self, var, block=None):
"""Initialize the input tensor with Uniform distribution.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(block, framework.Block)
check_variable_and_dtype(var, "Out",
["uint16", "float16", "float32", "float64"],
"uniform_random")
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initializers
if var.dtype == VarDesc.VarType.FP16:
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(".".join(
['uniform_random', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if framework.in_dygraph_mode():
out_var = _C_ops.uniform_random(
'shape', var.shape, 'min', self._low, 'max', self._high, 'seed',
self._seed, 'dtype', out_dtype, 'diag_num', self._diag_num,
'diag_step', self._diag_step, 'diag_val', self._diag_val)
if var.dtype == VarDesc.VarType.FP16:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
op = block.append_op(
type="uniform_random",
inputs={},
outputs={"Out": out_var},
attrs={
"shape": var.shape,
"dtype": out_dtype,
"min": self._low,
"max": self._high,
"seed": self._seed,
"diag_num": self._diag_num,
"diag_step": self._diag_step,
"diag_val": self._diag_val
},
stop_gradient=True)
if var.dtype == VarDesc.VarType.FP16:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
class NormalInitializer(Initializer):
"""Implements the Random Normal(Gaussian) distribution initializer
Args:
loc (float): mean of the normal distribution
scale (float): standard deviation of the normal distribution
seed (int): random seed
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0))
"""
def __init__(self, loc=0.0, scale=1.0, seed=0):
assert loc is not None
assert scale is not None
assert seed is not None
super(NormalInitializer, self).__init__()
self._mean = loc
self._std_dev = scale
self._seed = seed
def __call__(self, var, block=None):
"""Initialize the input tensor with Normal distribution.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(block, framework.Block)
check_variable_and_dtype(var, "Out",
["uint16", "float16", "float32", "float64"],
"guassian_random")
if self._seed == 0:
self._seed = block.program.random_seed
if framework.in_dygraph_mode():
out_var = _C_ops.gaussian_random(
'shape', var.shape, 'dtype', var.dtype, 'mean', self._mean,
'std', self._std_dev, 'seed', self._seed, 'use_mkldnn', False)
out_var._share_underline_tensor_to(var)
return None
else:
op = block.append_op(
type="gaussian_random",
outputs={"Out": var},
attrs={
"shape": var.shape,
"dtype": var.dtype,
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed,
"use_mkldnn": False
},
stop_gradient=True)
var.op = op
return op
class TruncatedNormalInitializer(Initializer):
"""Implements the Random TruncatedNormal(Gaussian) distribution initializer
Args:
loc (float): mean of the normal distribution
scale (float): standard deviation of the normal distribution
seed (int): random seed
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 1], dtype='float32')
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0))
"""
def __init__(self, loc=0.0, scale=1.0, seed=0):
assert loc is not None
assert scale is not None
assert seed is not None
super(TruncatedNormalInitializer, self).__init__()
self._mean = loc
self._std_dev = scale
self._seed = seed
def __call__(self, var, block=None):
"""Initialize the input tensor with TruncatedNormal distribution.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initalizers
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(".".join(
['truncated_gaussian_random', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if framework.in_dygraph_mode():
out_var = _C_ops.truncated_gaussian_random(
'shape', var.shape, 'dtype', out_dtype, 'mean', self._mean,
'std', self._std_dev, 'seed', self._seed)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
op = block.append_op(
type="truncated_gaussian_random",
outputs={"Out": out_var},
attrs={
"shape": var.shape,
"dtype": out_dtype,
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed
},
stop_gradient=True)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
class XavierInitializer(Initializer):
r"""
This class implements the Xavier weight initializer from the paper
`Understanding the difficulty of training deep feedforward neural
networks <http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
by Xavier Glorot and Yoshua Bengio.
This initializer is designed to keep the scale of the gradients
approximately same in all the layers. In case of Uniform distribution,
the range is [-x, x], where
.. math::
x = \sqrt{\\frac{6.0}{fan\_in + fan\_out}}
In case of Normal distribution, the mean is 0 and the standard deviation
is
.. math::
\sqrt{\\frac{2.0}{fan\_in + fan\_out}}
Args:
uniform (bool,default True): whether to use uniform ,if False use normal distribution
fan_in (float,default None): fan_in for Xavier initialization. If None, it is
inferred from the variable.
fan_out (float,default None): fan_out for Xavier initialization. If None, it is
inferred from the variable.
seed (int): random seed
Note:
It is recommended to set fan_in and fan_out to None for most cases.
Examples:
.. code-block:: python
import paddle.fluid as fluid
queries = fluid.data(name='x', shape=[None,1], dtype='float32')
fc = fluid.layers.fc(
input=queries, size=10,
param_attr=fluid.initializer.Xavier(uniform=False))
"""
def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0):
assert uniform is not None
assert seed is not None
super(XavierInitializer, self).__init__()
self._uniform = uniform
self._fan_in = fan_in
self._fan_out = fan_out
self._seed = seed
def __call__(self, var, block=None):
"""Initialize the input tensor with Xavier initialization.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(block, framework.Block)
check_variable_and_dtype(var, "Out",
["uint16", "float16", "float32", "float64"],
"xavier_init")
f_in, f_out = self._compute_fans(var)
# If fan_in and fan_out are passed, use them
fan_in = f_in if self._fan_in is None else self._fan_in
fan_out = f_out if self._fan_out is None else self._fan_out
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initalizers
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(".".join(
['xavier_init', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if framework.in_dygraph_mode():
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in + fan_out))
out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
-limit, 'max', limit, 'seed',
self._seed, 'dtype', out_dtype)
else:
std = np.sqrt(2.0 / float(fan_in + fan_out))
out_var = _C_ops.gaussian_random(
'shape', out_var.shape, 'dtype', out_dtype, 'mean', 0.0,
'std', std, 'seed', self._seed)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in + fan_out))
op = block.append_op(
type="uniform_random",
inputs={},
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": out_dtype,
"min": -limit,
"max": limit,
"seed": self._seed
},
stop_gradient=True)
else:
std = np.sqrt(2.0 / float(fan_in + fan_out))
op = block.append_op(
type="gaussian_random",
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": out_dtype,
"mean": 0.0,
"std": std,
"seed": self._seed
},
stop_gradient=True)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
class MSRAInitializer(Initializer):
r"""Implements the MSRA initializer a.k.a. Kaiming Initializer
This class implements the weight initialization from the paper
`Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_
by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a
robust initialization method that particularly considers the rectifier
nonlinearities. In case of Uniform distribution, the range is [-x, x], where
.. math::
x = \sqrt{\\frac{6.0}{fan\_in}}
In case of Normal distribution, the mean is 0 and the standard deviation
is
.. math::
\sqrt{\\frac{2.0}{fan\_in}}
Args:
uniform (bool): whether to use uniform or normal distribution
fan_in (float32|None): fan_in for MSRAInitializer. If None, it is\
inferred from the variable. default is None.
seed (int32): random seed
Note:
It is recommended to set fan_in to None for most cases.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
x = fluid.data(name="data", shape=[8, 32, 32], dtype="float32")
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.MSRA(uniform=False))
"""
def __init__(self, uniform=True, fan_in=None, seed=0):
"""Constructor for MSRAInitializer
"""
assert uniform is not None
assert seed is not None
super(MSRAInitializer, self).__init__()
self._uniform = uniform
self._fan_in = fan_in
self._seed = seed
def __call__(self, var, block=None):
"""Initialize the input tensor with MSRA initialization.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
f_in, f_out = self._compute_fans(var)
# If fan_in is passed, use it
fan_in = f_in if self._fan_in is None else self._fan_in
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initalizers
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(".".join(
['masra_init', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if framework.in_dygraph_mode():
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in))
out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
-limit, 'max', limit, 'seed',
self._seed, 'dtype',
int(out_dtype))
else:
std = np.sqrt(2.0 / float(fan_in))
out_var = _C_ops.gaussian_random(
'shape', out_var.shape, 'dtype',
int(out_dtype), 'mean', 0.0, 'std', std, 'seed', self._seed)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
if self._uniform:
limit = np.sqrt(6.0 / float(fan_in))
op = block.append_op(
type="uniform_random",
inputs={},
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": int(out_dtype),
"min": -limit,
"max": limit,
"seed": self._seed
},
stop_gradient=True)
else:
std = np.sqrt(2.0 / float(fan_in))
op = block.append_op(
type="gaussian_random",
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": int(out_dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
},
stop_gradient=True)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
class BilinearInitializer(Initializer):
"""
This initializer can be used in transposed convolution operator to
act as upsampling. Users can upsample a feature map with shape of
(B, C, H, W) by any integer factor. The usage is:
Examples:
.. code-block:: python
import math
import paddle
import paddle.nn as nn
from paddle.regularizer import L2Decay
factor = 2
C = 2
B = 8
H = W = 32
w_attr = paddle.ParamAttr(learning_rate=0.,
regularizer=L2Decay(0.),
initializer=nn.initializer.Bilinear())
data = paddle.rand([B, 3, H, W], dtype='float32')
conv_up = nn.Conv2DTranspose(3,
out_channels=C,
kernel_size=2 * factor - factor % 2,
padding=int(
math.ceil((factor - 1) / 2.)),
stride=factor,
weight_attr=w_attr,
bias_attr=False)
x = conv_up(data)
Where, `out_channels=C` and `groups=C` means this is channel-wise transposed
convolution. The filter shape will be (C, 1, K, K) where K is `kernel_size`,
This initializer will set a (K, K) interpolation kernel for every channel
of the filter identically. The resulting shape of the output feature map
will be (B, C, factor * H, factor * W). Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
interpolation unchanged during training.
"""
def __init__(self):
"""Constructor for BilinearInitializer.
"""
super(BilinearInitializer, self).__init__()
def __call__(self, var, block=None):
"""Initialize the input tensor with Bilinear initialization.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
if not isinstance(var, framework.Variable):
raise ValueError("var must be framework.Variable.")
if not isinstance(block, framework.Block):
raise ValueError("block must be framework.Block.")
shape = var.shape
if len(shape) != 4:
raise ValueError("the length of shape must be 4.")
if shape[2] != shape[3]:
raise ValueError("shape[2] must be equal to shape[3].")
weight = np.zeros(np.prod(var.shape), dtype='float32')
size = shape[3]
# factor
f = np.ceil(size / 2.)
# center
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % size
y = (i / size) % size
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
weight = np.reshape(weight, shape)
# to be compatible of fp16 initalizers
if var.dtype in [
VarDesc.VarType.FP16, VarDesc.VarType.BF16, VarDesc.VarType.FP64
]:
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(".".join(
['bilinear_init', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if out_dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in weight.flat]
else:
raise TypeError("Unsupported dtype %s", var.dtype)
if np.prod(shape) > 1024 * 1024:
raise ValueError("The size of input is too big. ")
if framework.in_dygraph_mode():
out_var = _C_ops.assign_value('shape',
list(shape), 'dtype', out_dtype,
value_name, values)
if var.dtype in [
VarDesc.VarType.FP16, VarDesc.VarType.BF16,
VarDesc.VarType.FP64
]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
op = block.append_op(
type='assign_value',
outputs={'Out': [out_var]},
attrs={
'dtype': out_dtype,
'shape': list(shape),
value_name: values
})
if var.dtype in [
VarDesc.VarType.FP16, VarDesc.VarType.BF16,
VarDesc.VarType.FP64
]:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
class NumpyArrayInitializer(Initializer):
"""Init an parameter with an numpy array
This op initialize the variable by numpy array.
Args:
value (numpy): numpy array to initialize the variable
Returns:
A Tensor variable initialized by numpy.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
x = fluid.data(name="x", shape=[2, 1], dtype='float32')
fc = fluid.layers.fc(input=x, size=10,
param_attr=fluid.initializer.NumpyArrayInitializer(numpy.array([1,2])))
"""
def __init__(self, value):
import numpy
assert isinstance(value, numpy.ndarray)
super(NumpyArrayInitializer, self).__init__()
self._value = value
def __call__(self, var, block=None):
"""Initialize the input tensor with Numpy array.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# to be compatible of fp16 initalizers
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
out_dtype = VarDesc.VarType.FP32
np_value = self._value.astype("float32")
out_var = block.create_var(
name=unique_name.generate(".".join(
['numpy_array_init', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_var = var
out_dtype = var.dtype
np_value = self._value
if out_dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in np_value.flat]
elif out_dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in np_value.flat]
else:
raise ValueError("Unsupported dtype %s", self._value.dtype)
if self._value.size > 1024 * 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if framework.in_dygraph_mode():
out_var = _C_ops.assign_value('shape',
list(self._value.shape), 'dtype',
out_dtype, value_name, values)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
'out_dtype', var.dtype)
var_tmp._share_underline_tensor_to(var)
else:
out_var._share_underline_tensor_to(var)
return None
else:
op = block.append_op(
type='assign_value',
outputs={'Out': out_var},
attrs={
'dtype': out_dtype,
'shape': list(self._value.shape),
value_name: values
},
stop_gradient=True)
if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
var.op = op
return op
def set_global_initializer(weight_init, bias_init=None):
"""
This API is used to set up global model parameter initializer in framework.
After this API is invoked, the global initializer will takes effect in subsequent code.
The model parameters include ``weight`` and ``bias`` . In the framework, they correspond
to ``paddle.ParamAttr`` , which is inherited from ``paddle.Tensor`` , and is a persistable Variable.
This API only takes effect for model parameters, not for variables created through apis such as
:ref:`api_fluid_layers_create_global_var` , :ref:`api_fluid_layers_create_tensor`.
If the initializer is also set up by ``param_attr`` or ``bias_attr`` when creating a network layer,
the global initializer setting here will not take effect because it has a lower priority.
If you want to cancel the global initializer in framework, please set global initializer to ``None`` .
Args:
weight_init (Initializer): set the global initializer for ``weight`` of model parameters.
bias_init (Initializer, optional): set the global initializer for ``bias`` of model parameters.
Default: None.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
nn.initializer.set_global_initializer(nn.initializer.Uniform(), nn.initializer.Constant())
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
# The weight of conv1 is initialized by Uniform
# The bias of conv1 is initialized by Constant
conv1 = nn.Conv2D(4, 6, (3, 3))
y_var1 = conv1(x_var)
# If set param_attr/bias_attr too, global initializer will not take effect
# The weight of conv2 is initialized by Xavier
# The bias of conv2 is initialized by Normal
conv2 = nn.Conv2D(4, 6, (3, 3),
weight_attr=nn.initializer.XavierUniform(),
bias_attr=nn.initializer.Normal())
y_var2 = conv2(x_var)
# Cancel the global initializer in framework, it will takes effect in subsequent code
nn.initializer.set_global_initializer(None)
"""
check_type(weight_init, 'weight_init', (Initializer, type(None)),
'set_global_initializer')
global _global_weight_initializer_
_global_weight_initializer_ = weight_init
check_type(bias_init, 'bias_init', (Initializer, type(None)),
'set_global_initializer')
global _global_bias_initializer_
_global_bias_initializer_ = bias_init
def _global_weight_initializer():
"""
Return the global weight initializer, The user doesn't need to use it.
"""
return _global_weight_initializer_
def _global_bias_initializer():
"""
Return the global weight initializer, The user doesn't need to use it.
"""
return _global_bias_initializer_
def calculate_gain(nonlinearity, param=None):
"""
Get the recommended ``gain`` value of some nonlinearity function. ``gain`` value can be used in some
``paddle.nn.initializer`` api to adjust the initialization value.
Args:
nonlinearity(str): name of nonlinearity activation function. If it is a linear function, such as:
`linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose` , 1.0 will be returned.
param(bool|int|float, optional): optional parameter for somme nonlinearity function. Now, it only applies to
'leaky_relu'. Default: None, it will be calculated as 0.01 in the formula.
Returns:
A float value, which is the recommended gain for this nonlinearity function.
Examples:
.. code-block:: python
import paddle
gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3
gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))
"""
if param is None:
param = 0.01
else:
assert isinstance(param, (bool, int, float))
param = float(param)
recommended_gain = {
'sigmoid': 1,
'linear': 1,
'conv1d': 1,
'conv2d': 1,
'conv3d': 1,
'conv1d_transpose': 1,
'conv2d_transpose': 1,
'conv3d_transpose': 1,
'tanh': 5.0 / 3,
'relu': math.sqrt(2.0),
'leaky_relu': math.sqrt(2.0 / (1 + param**2)),
'selu': 3.0 / 4
}
if nonlinearity in recommended_gain.keys():
return recommended_gain[nonlinearity]
else:
raise ValueError("nonlinearity function {} is not suppported now.".
format(nonlinearity))
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
# param_attr=ParamAttr(fluid.initializer.Xavier()))
#
# It is no need to add an `Initializer` as the class suffix
Constant = ConstantInitializer
Uniform = UniformInitializer
Normal = NormalInitializer
TruncatedNormal = TruncatedNormalInitializer
Xavier = XavierInitializer
MSRA = MSRAInitializer
Bilinear = BilinearInitializer
|
luotao1/Paddle
|
python/paddle/fluid/initializer.py
|
Python
|
apache-2.0
| 42,276
|
[
"Gaussian"
] |
2bc34e7fe4094e7b6235747a687ef2bd76cff3ea967108cc1e4c9a454a6c6436
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
__version__ = '0.1.3'
from pipes import quote
from subprocess import Popen, PIPE
from datetime import date, timedelta
def count_git_log(range='', paths=None, options=None):
if options is None:
options = {}
options['oneline'] = True
shell_args = []
for k, v in list(options.items()):
if isinstance(v, bool) and v:
shell_args.append('--%s' % k.replace('_', '-'))
elif v:
shell_args.append('--%s=%s' % (k.replace('_', '-'), quote(v)))
if paths:
shell_args.append('-- %s' % paths)
popen = Popen('git log %s %s' %
(range, ' '.join(shell_args)), shell=True, stdout=PIPE)
if popen.wait():
return None
else:
return popen.stdout.read().count('\n')
DAY = timedelta(days=1)
WEEK = timedelta(weeks=1)
DATE_FORMAT = '%Y-%m-%d 00:00:00'
def count(author=None, period='weekly', first='monday', number=None, range='', paths=None, not_all=False, merges=False, **options):
'''It counts the commits in a Git repository.
-a, --author=<str> Specify an author.
-p, --period=<str> Specify the period: daily (d), weekly (w), monthly
(m) or yearly (y). Default is weekly.
-f, --first=<str> Specify the first day of weeks: monday (mon), sunday
(sun), saturday (sat). Default is monday.
-n, --number=<int> How many periods?
-r, --range=<str> Specify the range, ex. master..dev.
-t, --paths=<str> Specify the paths, ex. .gitignore.
--not-all Count the commits in current branch only.
--merges Include the merge commits.
The other arguments will be passed to the command, ``git log``.
'''
assert period[0] in 'dwmy', "option 'period' should be daily (d), weekly (w), monthly (m) or yearly (y)"
assert first[:3] in (
'mon', 'sun', 'sat'), "option 'first' should be monday (mon), sunday (sun), saturday (sat)"
today = date.today()
if period.startswith('d'):
until = today+DAY
if not number:
number = 14
elif period.startswith('w'):
until = today - today.weekday()*DAY + WEEK
if first[:3] == 'sun':
until -= DAY
elif first[:3] == 'sat':
until -= 2*DAY
if not number:
number = 8
elif period.startswith('m'):
until = date(
today.year+(today.month+1 > 12),
(today.month+1) % 12,
1
)
if not number:
number = 12
elif period.startswith('y'):
until = date(today.year+1, 1, 1)
if not number:
number = 5
options['author'] = author
options['all'] = not not_all
options['no_merges'] = not merges
while number > 0:
if period.startswith('d'):
since = until - DAY
elif period.startswith('w'):
since = until - WEEK
elif period.startswith('m'):
since = date(
until.year-(until.month-1 <= 0),
1 + ((12+(until.month-1)-1) % 12),
1
)
elif period.startswith('y'):
since = date(until.year-1, 1, 1)
options['since'] = since.strftime(DATE_FORMAT)
options['until'] = until.strftime(DATE_FORMAT)
count = count_git_log(range, paths, options)
if count is not None:
print('%s\t%s' % (since, count))
else:
return
until = since
number -= 1
def main():
try:
import clime
except ImportError:
clime = None
if clime and clime.__version__ >= '0.2':
clime.start({'count': count})
else:
import sys
print('It works better with Clime (>= 0.2). Visit http://clime.mosky.tw/ for more details.', file=sys.stderr)
if len(sys.argv) <= 1:
count()
else:
count(sys.argv[1])
if __name__ == '__main__':
main()
|
moskytw/git-count
|
gitcount.py
|
Python
|
mit
| 4,092
|
[
"VisIt"
] |
3b6c0b6e6d67a975b1f5c0e1f1dc2d8dc419133adcb2f2ad7557b8dacdcbc424
|
"""
TornadoREST is the base class for your RESTful API handlers.
It directly inherits from :py:class:`tornado.web.RequestHandler`
"""
import os
import inspect
from tornado.escape import json_decode
from tornado.web import url as TornadoURL
from urllib.parse import unquote
from functools import partial
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Tornado.Server.private.BaseRequestHandler import *
sLog = gLogger.getSubLogger(__name__)
# decorator to determine the path to access the target method
location = partial(set_attribute, "location")
location.__doc__ = """
Use this decorator to determine the request path to the target method
Example:
@location('/test/myAPI')
def post_my_method(self, a, b):
''' Usage:
requests.post(url + '/test/myAPI?a=value1?b=value2', cert=cert).context
'["value1", "value2"]'
'''
return [a, b]
"""
class TornadoREST(BaseRequestHandler): # pylint: disable=abstract-method
"""Base class for all the endpoints handlers.
### Example
In order to create a handler for your service, it has to follow a certain skeleton.
Simple example:
.. code-block:: python
from DIRAC.Core.Tornado.Server.TornadoREST import *
class yourEndpointHandler(TornadoREST):
def get_hello(self, *args, **kwargs):
''' Usage:
requests.get(url + '/hello/pos_arg1', params=params).json()['args]
['pos_arg1']
'''
return {'args': args, 'kwargs': kwargs}
.. code-block:: python
from diraccfg import CFG
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
from DIRAC.Core.Tornado.Server.TornadoREST import *
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import JobMonitoringClient
class yourEndpointHandler(TornadoREST):
# Specify the default permission for the handler
DEFAULT_AUTHORIZATION = ['authenticated']
# Base URL
DEFAULT_LOCATION = "/"
@classmethod
def initializeHandler(cls, infosDict):
''' Initialization '''
cls.my_requests = 0
cls.j_manager = JobManagerClient()
cls.j_monitor = JobMonitoringClient()
def initializeRequest(self):
''' Called at the beginning of each request '''
self.my_requests += 1
# In the annotation, you can specify the expected value type of the argument
def get_job(self, jobID:int, category=None):
'''Usage:
requests.get(f'https://myserver/job/{jobID}', cert=cert)
requests.get(f'https://myserver/job/{jobID}/owner', cert=cert)
requests.get(f'https://myserver/job/{jobID}/site', cert=cert)
'''
if not category:
return self.j_monitor.getJobStatus(jobID)
if category == 'owner':
return self.j_monitor.getJobOwner(jobID)
if category == 'owner':
return self.j_monitor.getJobSite(jobID)
else:
# TornadoResponse allows you to call tornadoes methods, thread-safe
return TornadoResponse().redirect(f'/job/{jobID}')
def get_jobs(self, owner=None, *, jobGroup=None, jobName=None):
'''Usage:
requests.get(f'https://myserver/jobs', cert=cert)
requests.get(f'https://myserver/jobs/{owner}?jobGroup=job_group?jobName=job_name', cert=cert)
'''
conditions = {"Owner": owner or self.getRemoteCredentials}
if jobGroup:
conditions["JobGroup"] = jobGroup
if jobName:
conditions["JobName"] = jobName
return self.j_monitor.getJobs(conditions, date)
def post_job(self, manifest):
'''Usage:
requests.post(f'https://myserver/job', cert=cert, json=[{Executable: "/bin/ls"}])
'''
jdl = dumpCFGAsJDL(CFG.CFG().loadFromDict(manifest))
return self.j_manager.submitJob(str(jdl))
def delete_job(self, jobIDs):
'''Usage:
requests.delete(f'https://myserver/job', cert=cert, json=[123, 124])
'''
return self.j_manager.deleteJob(jobIDs)
@authentication(["VISITOR"])
@authorization(["all"])
def options_job(self):
'''Usage:
requests.options(f'https://myserver/job')
'''
return "You use OPTIONS method to access job manager API."
.. note:: This example aims to show how access interfaces can be implemented and no more
This class can read the method annotation to understand what type of argument expects to get the method,
see :py:meth:`_getMethodArgs`.
Note that because we inherit from :py:class:`tornado.web.RequestHandler`
and we are running using executors, the methods you export cannot write
back directly to the client. Please see inline comments in
:py:class:`BaseRequestHandler <DIRAC.Core.Tornado.Server.private.BaseRequestHandler.BaseRequestHandler>` for more details.
"""
# By default we enable all authorization grants, see DIRAC.Core.Tornado.Server.private.BaseRequestHandler for details
DEFAULT_AUTHENTICATION = ["SSL", "JWT", "VISITOR"]
METHOD_PREFIX = None
DEFAULT_LOCATION = "/"
@classmethod
def _pre_initialize(cls) -> list:
"""This method is run by the Tornado server to prepare the handler for launch
this method is run before the server tornado starts for each handler.
it does the following:
- searches for all possible methods for which you need to create routes
- reads their annotation if present
- adds attributes to each target method that help to significantly speed up
the processing of the values of the target method arguments for each query
- prepares mappings between URLs and handlers/method in a clear tornado format
:returns: a list of URL (not the string with "https://..." but the tornado object)
see http://www.tornadoweb.org/en/stable/web.html#tornado.web.URLSpec
"""
urls = []
# Look for methods that are exported
for mName in cls.__dict__:
mObj = cls.__dict__[mName]
if cls.METHOD_PREFIX and mName.startswith(cls.METHOD_PREFIX):
# Target methods begin with a prefix defined for all supported http methods,
# e.g.: def export_myMethod(self):
prefix = len(cls.METHOD_PREFIX)
elif _prefix := [
p for p in cls.SUPPORTED_METHODS if mName.startswith(f"{p.lower()}_") # pylint: disable=no-member
]:
# Target methods begin with the name of the http method,
# e.g.: def post_myMethod(self):
prefix = len(_prefix[-1]) + 1
else:
# The name of the target method must contain a special prefix
continue
# if the method exists we will continue
if callable(mObj) and (methodName := mName[prefix:]):
sLog.debug(f" Find {mName} method")
# Find target method URL
url = os.path.join(
cls.DEFAULT_LOCATION, getattr(mObj, "location", "" if methodName == "index" else methodName)
)
if cls.BASE_URL and cls.BASE_URL.strip("/"):
url = cls.BASE_URL.strip("/") + (f"/{url}" if (url := url.strip("/")) else "")
url = f"/{url.strip('/')}/?"
sLog.verbose(f" - Route {url} -> {cls.__name__}.{mName}")
# Discover positional arguments
mObj.var_kwargs = False # attribute indicating the presence of `**kwargs``
args = []
kwargs = {}
# Read signature of a target function to explore arguments and their types
# https://docs.python.org/3/library/inspect.html#inspect.Signature
signature = inspect.signature(mObj)
for name in list(signature.parameters)[1:]: # skip `self` argument
# Consider in detail the description of the argument of the objective function
# to correctly form the route and determine the type of argument,
# see https://docs.python.org/3/library/inspect.html#inspect.Parameter
kind = signature.parameters[name].kind # argument type
default = signature.parameters[name].default # argument default value
# Determine what type of the target function argument is expected. By Default it's None.
_type = (
# Select the type specified in the target function, if any.
signature.parameters[name].annotation
if signature.parameters[name].annotation is not inspect.Parameter.empty
# If there is no argument annotation, take the default value type, if any
else type(default)
if default is not inspect.Parameter.empty and default is not None
# If you can not determine the type then leave None
else None
)
# Consider separately the positional arguments
if kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:
# register the positional argument type
args.append(_type)
# is argument optional
is_optional = (
kind is inspect.Parameter.POSITIONAL_OR_KEYWORD or default is inspect.Parameter.empty
)
# add to tornado route url regex describing the argument according to the type (if the type is specified)
# only simple types are considered, which should be more than enough
if _type is int:
url += r"(?:/([+-]?\d+)?)?" if is_optional else r"/([+-]?\d+)"
elif _type is float:
url += r"(?:/([+-]?\d*\.?\d+)?)?" if is_optional else r"/([+-]?\d*\.?\d+)"
elif _type is bool:
url += r"(?:/([01]|[A-z]+)?)?" if is_optional else r"/([01]|[A-z]+)"
else:
url += r"(?:/([\w%]+)?)?" if is_optional else r"/([\w%]+)"
# Consider separately the keyword arguments
if kind in [inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:
# register the keyword argument type
kwargs[name] = _type
if kind == inspect.Parameter.VAR_KEYWORD:
# if `**kwargs` is available in the target method,
# all additional query arguments will be passed there
mObj.var_kwargs = True
url += r"(?:[?&].+=.+)*"
# We will leave the results of the study here so as not to waste time on each request
mObj.keyword_kwarg_types = kwargs # an attribute that contains types of keyword arguments
mObj.positional_arg_types = args # an attribute that contains types of positional arguments
# We collect all generated tornado url for target handler methods
if url not in urls:
sLog.debug(f" * {url}")
urls.append(TornadoURL(url, cls, dict(method=methodName)))
return urls
@classmethod
def _getComponentInfoDict(cls, fullComponentName: str, fullURL: str) -> dict:
"""Fills the dictionary with information about the current component,
:param fullComponentName: full component name, see :py:meth:`_getFullComponentName`
:param fullURL: incoming request path
"""
return {}
@classmethod
def _getCSAuthorizarionSection(cls, apiName):
"""Search endpoint auth section.
:param str apiName: API name, see :py:meth:`_getFullComponentName`
:return: str
"""
return "%s/Authorization" % PathFinder.getAPISection(apiName)
def _getMethod(self):
"""Get target method function to call. By default we read the first section in the path
following the coincidence with the value of `DEFAULT_LOCATION`.
If such a method is not defined, then try to use the `index` method.
You can also restrict access to a specific method by adding a http method name as a target method prefix::
# Available from any http method specified in SUPPORTED_METHODS class variable
def export_myMethod(self, data):
if self.request.method == 'POST':
# Do your "post job" here
return data
# Available only for POST http method if it specified in SUPPORTED_METHODS class variable
def post_myMethod(self, data):
# Do your "post job" here
return data
:return: function name
"""
prefix = self.METHOD_PREFIX or f"{self.request.method.lower()}_"
# the method key is appended to the URLSpec object when handling the handler in `_pre_initialize`,
# the tornado server passes this argument to `initialize` method.
# Read more about it https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.initialize
return getattr(self, f"{prefix}{self._init_kwargs['method']}")
def _getMethodArgs(self, args: tuple, kwargs: dict) -> tuple:
"""Search method arguments.
By default, the arguments are taken from the description of the method itself.
Then the arguments received in the request are assigned by the name of the method arguments.
Usage:
# requests.post(url + "/my_api/pos_only_value", data={'standard': standard_value, 'kwd_only': kwd_only_value}, ..
# requests.post(url + "/my_api", json=[pos_only_value, standard_value, kwd_only_value], ..
@location("/my_api")
def post_note(self, pos_only, /, standard, *, kwd_only):
..
.. warning:: this means that the target methods cannot be wrapped in the decorator,
or if so the decorator must duplicate the arguments and annotation of the target method
:param args: positional arguments that comes from request path
:return: target method args and kwargs
"""
keywordArguments = {}
positionalArguments = []
for i, _type in enumerate(self.methodObj.positional_arg_types[: len(args)]):
if arg := args[i]:
positionalArguments.append(_type(unquote(arg)) if _type else unquote(arg))
if self.request.headers.get("Content-Type") == "application/json":
decoded = json_decode(body) if (body := self.request.body) else []
return (positionalArguments + decoded, {}) if isinstance(decoded, list) else (positionalArguments, decoded)
for name in self.request.arguments:
if name in self.methodObj.keyword_kwarg_types or self.methodObj.var_kwargs:
_type = self.methodObj.keyword_kwarg_types.get(name)
# Get list of the arguments or on argument according to the type
value = self.get_arguments(name) if _type in (tuple, list, set) else self.get_argument(name)
# Wrap argument with annotated type
keywordArguments[name] = _type(value) if _type else value
return (positionalArguments, keywordArguments)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Tornado/Server/TornadoREST.py
|
Python
|
gpl-3.0
| 16,420
|
[
"DIRAC"
] |
bdf44a56829893de55497f7eaed1c1e1f93fe4b95f008d0ad6a9a08f030b2b06
|
"""Environment file for PyFEHM. Set default attribute values."""
"""
Copyright 2013.
Los Alamos National Security, LLC.
This material was produced under U.S. Government contract DE-AC52-06NA25396 for
Los Alamos National Laboratory (LANL), which is operated by Los Alamos National
Security, LLC for the U.S. Department of Energy. The U.S. Government has rights
to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES
ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce
derivative works, such modified software should be clearly marked, so as not to
confuse it with the version available from LANL.
Additionally, this library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your option)
any later version. Accordingly, this library is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
Public License for more details.
"""
import os,platform,pkgutil
from types import *
floatKeys = ['linear_converge_NRmult_G1','quadratic_converge_NRmult_G2','stop_criteria_NRmult_G3',
'machine_tolerance_TMCH','overrelaxation_factor_OVERF','newton_cycle_tolerance_EPM',
'upstream_weighting_UPWGT','timestep_multiplier_AIAA','min_timestep_DAYMIN','max_timestep_DAYMAX',
'initial_timestep_DAY','max_time_TIMS','initial_year_YEAR','initial_month_MONTH','initial_day_INITTIME',
'init_solute_conc_ANO','implicit_factor_AWC','tolerance_EPC','upstream_weight_UPWGTA','solute_start_DAYCS',
'solute_end_DAYCF','flow_end_DAYHF','flow_start_DAYHS','max_iterations_IACCMX','timestep_multiplier_DAYCM',
'initial_timestep_DAYCMM','max_timestep_DAYCMX','print_interval_NPRTTRC','alpha1_A1ADSF',
'alpha2_A2ADSF','beta_BETADF']
intKeys = ['reduced_dof_IRDOF','reordering_param_ISLORD','IRDOF_param_IBACK','number_SOR_iterations_ICOUPL',
'max_machine_time_RNMAX','max_newton_iterations_MAXIT','number_orthogonalizations_NORTH',
'max_solver_iterations_MAXSOLVE','JA','JB','JC','order_gauss_elim_NAR', 'max_multiply_iterations_IAMM',
'implicitness_factor_AAW','gravity_direction_AGRAV','geometry_ICNL','stor_file_LDA','max_timestep_NSTEP',
'print_interval_IPRTOUT','coupling_NTT','element_integration_INTG','type_IADSF']
boolKeys = ['silent']
strKeys = ['acceleration_method_ACCM']
class fdflt(object):
def __init__(self):
# material properties - these values will be assigned as defaults if not otherwise set
self.permeability = 1.e-15
self.conductivity = 2.2
self.density = 2500.
self.porosity = 0.1
self.specific_heat = 1.e3
self.youngs_modulus = 1.e4 # MPa
self.poissons_ratio = 0.25
self.pressure_coupling = 1.
self.thermal_expansion = 3.e-5 # / K
# initial conditions
self.Pi = 1. # pressure
self.Ti = 30. # temperature
self.Si = 1. # saturation
# output data formats
self.hist_format = 'tec'
self.cont_format = 'surf'
self.parental_cont = True
# set this to the fehm executable to be used if no default assigned
self.fehm_path = 'c:\\path\\to\\fehm\\fehm.exe'
if os.name != 'posix':
self.paraview_path = 'paraview.exe'
else:
self.paraview_path = 'paraview'
if os.name != 'posix':
self.visit_path = 'visit.exe'
else:
self.visit_path = 'visit'
self.lagrit_path = 'c:\\path\\to\\lagrit\\lagrit.exe'
self.files = ['outp','hist','check']
self.co2_interp_path = 'c:\\path\\to\\co2\\co2_interp_table.txt'
self.co2_interp_path_2 = '/alternate/path/to/co2/co2_interp_table.txt'
if not os.path.isfile(self.co2_interp_path):
self.co2_interp_path = self.co2_interp_path_2
# fdata booleans
self.associate = True # associate macro, zone information with nodes
self.sticky_zones = True # print zone definitions immediately before use in input file
self.full_connectivity = True
self.sleep_time = 1.
self.keep_unknown = True # set true if PyFEHM should preserve unknown macros in future output files
self.silent = False # turns off all PyFEHM verbiage
# default values for mactro ITER (parameters controlling solver)
self.iter = {
'linear_converge_NRmult_G1':1.e-5, # convergence criteria
'quadratic_converge_NRmult_G2':1.e-5,
'stop_criteria_NRmult_G3':1.e-3,
'machine_tolerance_TMCH':-1.e-5,
'overrelaxation_factor_OVERF':1.1,
'reduced_dof_IRDOF':0,
'reordering_param_ISLORD':0,
'IRDOF_param_IBACK':0,
'number_SOR_iterations_ICOUPL':0,
'max_machine_time_RNMAX':3600, # number of minutes at which FEHM will cut a simulation
}
# default values for macro CTRL (parameters controlling simulation)
self.ctrl = {
'max_newton_iterations_MAXIT':10, # solver parameters
'newton_cycle_tolerance_EPM':1.e-5, # solver parameters
'number_orthogonalizations_NORTH':8, # solver parameters
'max_solver_iterations_MAXSOLVE':24,
'acceleration_method_ACCM':'gmre',
'JA':1,'JB':0,'JC':0,
'order_gauss_elim_NAR':2,
'implicitness_factor_AAW':1,
'gravity_direction_AGRAV':3, # direction of gravity
'upstream_weighting_UPWGT':1.0,
'max_multiply_iterations_IAMM':7,
'timestep_multiplier_AIAA':1.5, # acceleration, time step multiplier
'min_timestep_DAYMIN':1.e-5, # minimum allowable time step (days)
'max_timestep_DAYMAX':30., # maximum allowable time step (days)
'geometry_ICNL':0, # problem geometry (0 = 3-D)
'stor_file_LDA':0 # flag to use stor file
}
# default values for macro TIME
self.time = {
'initial_timestep_DAY':1., # initial time step size (days)
'max_time_TIMS':365., # maximum simulation time (days)
'max_timestep_NSTEP':200, # maximum number of time steps
'print_interval_IPRTOUT':1, # for printing information to screen
'initial_year_YEAR':None, # initial simulation time (years)
'initial_month_MONTH':None, # (months)
'initial_day_INITTIME':None # (years)
}
# default values for macro SOL
self.sol = {
'coupling_NTT':1,
'element_integration_INTG':-1
}
# default values for macro TRAC
self.trac = {
'init_solute_conc_ANO':0.,
'implicit_factor_AWC':1.,
'tolerance_EPC':1.e-7,
'upstream_weight_UPWGTA':0.5,
'solute_start_DAYCS':1.,
'solute_end_DAYCF':2.,
'flow_end_DAYHF':1.,
'flow_start_DAYHS':2.,
'max_iterations_IACCMX':50,
'timestep_multiplier_DAYCM':1.2,
'initial_timestep_DAYCMM':1.,
'max_timestep_DAYCMX':1000.,
'print_interval_NPRTTRC':1.
}
self.adsorption = {
'type_IADSF':None,
'alpha1_A1ADSF':None,
'alpha2_A2ADSF':None,
'beta_BETADF':None
}
# check to see if rc file exist, update defaults
self._check_rc()
def _check_rc(self):
# check if pyfehmrc file exists
rc_lib = pkgutil.get_loader('fdflt').path.split(os.sep)
rc_lib1 = os.sep.join(rc_lib[:-1])+os.sep+'.pyfehmrc'
rc_lib2 = os.sep.join(rc_lib[:-1])+os.sep+'pyfehmrc'
rc_home1 = os.path.expanduser('~')+os.sep+'.pyfehmrc'
rc_home2 = os.path.expanduser('~')+os.sep+'pyfehmrc'
rc_cur1 = os.path.expanduser('.')+os.sep+'.pyfehmrc'
rc_cur2 = os.path.expanduser('.')+os.sep+'pyfehmrc'
if os.path.isfile(rc_cur1): fp = open(rc_cur1)
elif os.path.isfile(rc_cur2): fp = open(rc_cur2)
elif os.path.isfile(rc_home1): fp = open(rc_home1)
elif os.path.isfile(rc_home2): fp = open(rc_home2)
elif os.path.isfile(rc_lib1): fp = open(rc_lib1)
elif os.path.isfile(rc_lib2): fp = open(rc_lib2)
else: return
lns = fp.readlines()
for ln in lns:
ln = ln.split('#')[0] # strip off the comment
if ln.startswith('#'): continue
elif ln.strip() == '': continue
elif '&' in ln:
if len(ln.split('&')) == 2:
self._update_attribute(ln)
elif len(ln.split('&')) == 3:
self._update_dict(ln)
else:
print('WARNING: unrecognized .pyfehmrc line \''+ln.strip()+'\'')
else:
print('WARNING: unrecognized .pyfehmrc line \''+ln.strip()+'\'')
def _update_attribute(self,ln):
name,value = ln.split('&')
name,value = name.strip(), value.strip()
attributelist = list(self.__dict__.keys())
if name not in attributelist:
print('ERROR: no attribute \''+name+'\''); return
if isinstance(self.__dict__[name],dict):
print('ERROR: \''+name+'\' a dictionary. To set a dictionary value supply the dictionary key in format:')
print('dict_name : dict_key : value')
return
# translate None string
if value in ['','None','none']: value = None
if isinstance(self.__dict__[name], bool):
if value in ['True','1','1.']:
self.__setattr__(name,True)
elif value in ['False','0.','0'] or value == None:
self.__setattr__(name,False)
else:
print('ERROR: unrecognized boolean type \''+value+'\''); return
elif isinstance(self.__dict__[name], int):
if value is not None: self.__setattr__(name,int(float(value)))
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name], float):
if value is not None: self.__setattr__(name,float(value))
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name], str):
if value is not None: self.__setattr__(name,value)
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name], None):
if value is not None: self.__setattr__(name,value)
else: self.__setattr__(name,None)
def _update_dict(self,ln):
name,key,value = ln.split('&')
name,key,value = name.strip(), key.strip(), value.strip()
dictlist = [k for k in list(self.__dict__.keys()) if type(self.__dict__[k]) is dict]
if name not in dictlist:
print('ERROR: no dictionary \''+name+'\''); return
keys = list(self.__dict__[name].keys())
if key not in keys:
print('ERROR: no such key \''+key+'\' in dictionary \''+name+'\''); return
# translate None string
if value in ['','None','none']: value = None
if isinstance(self.__dict__[name][key], int):
if value is not None: self.__dict__[name].__setitem__(key,int(float(value)))
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name][key], float):
if value is not None: self.__dict__[name].__setitem__(key,float(value))
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name][key], str):
if value is not None: self.__dict__[name].__setitem__(key,value)
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name][key], None):
if key in strKeys:
if value is not None: self.__dict__[name].__setitem__(key,value)
else: self.__dict__[name].__setitem__(key,None)
elif key in intKeys:
if value is not None: self.__dict__[name].__setitem__(key,int(float(value)))
else: self.__dict__[name].__setitem__(key,None)
elif key in floatKeys:
if value is not None: self.__dict__[name].__setitem__(key,float(value))
else: self.__dict__[name].__setitem__(key,None)
elif key in boolKeys:
if value in ['True','1','1.']:
self.__dict__[name].__setitem__(key,True)
elif value in ['False','0.','0'] or value == None:
self.__dict__[name].__setitem__(key,False)
else: self.__setattr__(name,None)
elif isinstance(self.__dict__[name][key], bool):
if value in ['True','1','1.']:
self.__dict__[name].__setitem__(key,True)
elif value in ['False','0.','0'] or value == None:
self.__dict__[name].__setitem__(key,False)
else:
print('ERROR: unrecognized boolean type \''+value+'\''); return
|
ddempsey/PyFEHM
|
fdflt.py
|
Python
|
lgpl-2.1
| 14,730
|
[
"ParaView",
"VisIt"
] |
434a1583875051054da87240ff28ff7b340d772aab97b881e54dccf2d1fb0b2d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
url(r'^jet/', include('jet.urls', 'jet')), # Django JET URLS
# User management
url(r'^users/', include('tweeter.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
gwhigs/tweeter
|
config/urls.py
|
Python
|
mit
| 1,666
|
[
"VisIt"
] |
6d086c9744d1e5962832643981958d69f078874b059440085567624c657179b7
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for response in orm.SurveyQuestionResponse.objects.all():
categories = response.question.categories.splitlines()
if categories:
if response.question.last_negative:
if response.response != categories[-1]:
response.positive_response = True
else:
if response.response == categories[0]:
response.positive_response = True
response.save()
def backwards(self, orm):
"Write your backwards methods here."
for response in orm.SurveyQuestionResponse.objects.all():
response.positive_response = None
response.save()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'satisfied': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'survey_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'survey.displaylabel': {
'Meta': {'object_name': 'DisplayLabel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_label': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.DisplayLabel']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_negative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('visit', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'positive_response': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.TextField', [], {}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
symmetrical = True
|
myvoice-nigeria/myvoice
|
myvoice/survey/migrations/0014_positive_response.py
|
Python
|
bsd-2-clause
| 13,404
|
[
"VisIt"
] |
afe5993cea71c98b4e4ceaa19a2305a1dd9ab6b55054d167a578439a6dae8197
|
import numpy as np
from ase import Atoms
from gpaw import GPAW, FermiDirac
from gpaw.response.df import DielectricFunction
from gpaw.test import equal, findpeak
GS = 1
ABS = 1
if GS:
cluster = Atoms('Au2', [(0, 0, 0), (0, 0, 2.564)])
cluster.set_cell((6, 6, 6), scale_atoms=False)
cluster.center()
calc = GPAW(mode='pw',
dtype=complex,
xc='RPBE',
nbands=16,
eigensolver='rmm-diis',
occupations=FermiDirac(0.01))
cluster.set_calculator(calc)
cluster.get_potential_energy()
calc.diagonalize_full_hamiltonian(nbands=24, scalapack=True)
calc.write('Au2.gpw', 'all')
if ABS:
df = DielectricFunction('Au2.gpw',
frequencies=np.linspace(0, 14, 141),
hilbert=not True,
eta=0.1,
ecut=10)
b0, b = df.get_dielectric_function(filename=None,
direction='z')
a0, a = df.get_polarizability(filename=None,
direction='z')
a0_ws, a_ws = df.get_polarizability(filename=None,
wigner_seitz_truncation=True,
direction='z')
w0_ = 5.60491055
I0_ = 244.693028
w_ = 5.696528390
I_ = 207.8
w, I = findpeak(np.linspace(0, 14., 141), b0.imag)
equal(w, w0_, 0.05)
equal(6**3 * I / (4 * np.pi), I0_, 0.5)
w, I = findpeak(np.linspace(0, 14., 141), a0.imag)
equal(w, w0_, 0.05)
equal(I, I0_, 0.5)
w, I = findpeak(np.linspace(0, 14., 141), a0_ws.imag)
equal(w, w0_, 0.05)
equal(I, I0_, 0.5)
w, I = findpeak(np.linspace(0, 14., 141), b.imag)
equal(w, w_, 0.05)
equal(6**3 * I / (4 * np.pi), I_, 0.5)
w, I = findpeak(np.linspace(0, 14., 141), a.imag)
equal(w, w_, 0.05)
equal(I, I_, 0.5)
# The Wigner-Seitz truncation does not give exactly the same for small cell
w, I = findpeak(np.linspace(0, 14., 141), a_ws.imag)
equal(w, w_, 0.2)
equal(I, I_, 8.0)
|
robwarm/gpaw-symm
|
gpaw/test/au02_absorption.py
|
Python
|
gpl-3.0
| 2,114
|
[
"ASE",
"GPAW"
] |
6f3138757b0b46fa9e9e3176d2d384ca93041a22f83b16a2f7cbe4be745e517e
|
# $HeadURL$
__RCSID__ = "$Id$"
from socket import socket, AF_INET, SOCK_DGRAM
import struct
import time as time
import datetime
from DIRAC import S_OK, S_ERROR
TIME1970 = 2208988800
gDefaultNTPServers = [ "pool.ntp.org" ]
def getNTPUTCTime( serverList = None, retries = 2 ):
data = '\x1b' + 47 * '\0'
if not serverList:
serverList = gDefaultNTPServers
for server in serverList:
client = socket( AF_INET, SOCK_DGRAM )
client.settimeout( 1 )
worked = False
while retries >= 0 and not worked:
try:
client.sendto( data, ( server, 123 ) )
data, address = client.recvfrom( 1024 )
worked = True
except Exception:
retries -= 1
if not worked:
continue
if data:
myTime = struct.unpack( '!12I', data )[10]
myTime -= TIME1970
return S_OK( datetime.datetime( *time.gmtime( myTime )[:6] ) )
return S_ERROR( "Could not get NTP time" )
def getClockDeviation( serverList = None ):
result = getNTPUTCTime( serverList )
if not result[ 'OK' ]:
return result
td = datetime.datetime.utcnow() - result[ 'Value' ]
return S_OK( abs( td.days * 86400 + td.seconds ) )
|
fstagni/DIRAC
|
Core/Utilities/NTP.py
|
Python
|
gpl-3.0
| 1,161
|
[
"DIRAC"
] |
173db7b04fc0f039c7d813479d8ad8b71e8b2ca6368e8cebcbf040a44f4dc2a1
|
'''
Arthur Glowacki
APS ANL
10/17/2014
'''
import sys
import vtk
import math
from PyQt4 import QtCore, QtGui
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from Scanner import Scanner
from Volumizer import Volumizer
import h5py
from Generator import GenerateWithCubesAndSphereThread
import random, time
import Optics
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self, parent)
self.frame = QtGui.QFrame()
self.scanMutex = QtCore.QMutex()
self.volumizer = Volumizer()
self.volumizer.notifyFinish.connect(self.onFinishVolume)
self.isSceneGenerated = False
self.vl = QtGui.QHBoxLayout()
self.vtkWidget = QVTKRenderWindowInteractor(self.frame)
self.vl.addWidget(self.vtkWidget)
tab_widget = QtGui.QTabWidget()
tab_widget.addTab(self.createGenPropsWidget(), "Generate")
tab_widget.addTab(self.createScanPropsWidget(), "Scan")
tab_widget.addTab(self.createVolumePropsWidget(), "Volume")
self.vl.addWidget(tab_widget)
self.genTask = GenerateWithCubesAndSphereThread()
self.genTask.notifyProgress.connect(self.onGenProgress)
self.genTask.notifyFinish.connect(self.onGenFinish)
self.ren = vtk.vtkRenderer()
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
self.ren.ResetCamera()
self.frame.setLayout(self.vl)
self.setCentralWidget(self.frame)
self.show()
self.iren.Initialize()
def createGridInputWidget(self):
GridStartVal = '2'
hBox = QtGui.QHBoxLayout()
self.GridXIn = QtGui.QLineEdit()
self.GridYIn = QtGui.QLineEdit()
self.GridZIn = QtGui.QLineEdit()
self.GridXIn.setText(GridStartVal)
self.GridYIn.setText(GridStartVal)
self.GridZIn.setText(GridStartVal)
hBox.addWidget(QtGui.QLabel("X"))
hBox.addWidget(self.GridXIn)
hBox.addWidget(QtGui.QLabel("Y"))
hBox.addWidget(self.GridYIn)
hBox.addWidget(QtGui.QLabel("Z"))
hBox.addWidget(self.GridZIn)
gridGroup = QtGui.QGroupBox('Grid Size')
gridGroup.setLayout(hBox)
return gridGroup
def createElementTableWidget(self):
print 'TODO: create '
def createGenPropsWidget(self):
DsetStartVal = '1000'
vBox0 = QtGui.QVBoxLayout()
self.BaseScaleStart = QtGui.QLineEdit()
self.BaseScaleEnd = QtGui.QLineEdit()
self.BaseRotateStart = QtGui.QLineEdit()
self.BaseRotateEnd = QtGui.QLineEdit()
self.ElementScaleStart = QtGui.QLineEdit()
self.ElementScaleEnd = QtGui.QLineEdit()
self.ElementsPerFaceIn = QtGui.QLineEdit()
self.NumElementsIn = QtGui.QLineEdit()
self.UseMultiSpheresChk = QtGui.QCheckBox("MultiSphere element models:")
self.BaseScaleStart.setText('4.0')
self.BaseScaleEnd.setText('7.0')
self.BaseRotateStart.setText('0.0')
self.BaseRotateEnd.setText('180.0')
self.ElementScaleStart.setText('0.2')
self.ElementScaleEnd.setText('0.2')
self.ElementsPerFaceIn.setText('1')
self.NumElementsIn.setText('1')
self.UseMultiSpheresChk.setChecked(False)
'''
self.BaseScaleStart.setFixedWidth(32)
self.BaseScaleEnd.setFixedWidth(32)
self.BaseRotateStart.setFixedWidth(32)
self.BaseRotateEnd.setFixedWidth(32)
self.ElementScaleStart.setFixedWidth(32)
self.ElementScaleEnd.setFixedWidth(32)
self.ElementsPerFaceIn.setFixedWidth(32)
self.NumElementsIn.setFixedWidth(32)
'''
baseGroup = QtGui.QGroupBox("Base Material")
vBox1 = QtGui.QVBoxLayout()
hBox0 = QtGui.QHBoxLayout()
hBox0.addWidget(QtGui.QLabel("From:"))
hBox0.addWidget(self.BaseScaleStart)
hBox0.addWidget(QtGui.QLabel("To:"))
hBox0.addWidget(self.BaseScaleEnd)
vBox1.addWidget(QtGui.QLabel("Scale:"))
vBox1.addLayout(hBox0)
hBox1 = QtGui.QHBoxLayout()
hBox1.addWidget(QtGui.QLabel("From:"))
hBox1.addWidget(self.BaseRotateStart)
hBox1.addWidget(QtGui.QLabel("To:"))
hBox1.addWidget(self.BaseRotateEnd)
vBox1.addWidget(QtGui.QLabel("Rotate (degrees):"))
vBox1.addLayout(hBox1)
baseGroup.setLayout(vBox1)
elementGroup = QtGui.QGroupBox("Element Material")
vBox2 = QtGui.QVBoxLayout()
hBox2 = QtGui.QHBoxLayout()
hBox2.addWidget(QtGui.QLabel("Num of different elements:"))
hBox2.addWidget(self.NumElementsIn)
hBox2.addWidget(QtGui.QLabel("Num Per Suface:"))
hBox2.addWidget(self.ElementsPerFaceIn)
vBox2.addLayout(hBox2)
#vBox2.addWidget(self.UseMultiSpheresChk)
hBox3 = QtGui.QHBoxLayout()
hBox3.addWidget(QtGui.QLabel("From:"))
hBox3.addWidget(self.ElementScaleStart)
hBox3.addWidget(QtGui.QLabel("To:"))
hBox3.addWidget(self.ElementScaleEnd)
vBox2.addWidget(QtGui.QLabel("Scale:"))
vBox2.addLayout(hBox3)
elementGroup.setLayout(vBox2)
self.btnGenScan = QtGui.QPushButton('Generate')
self.btnGenScan.clicked.connect(self.generateScan)
self.genProgressBar = QtGui.QProgressBar(self)
self.genProgressBar.setRange(0,100)
vBox0.addWidget(self.createGridInputWidget())
vBox0.addWidget(baseGroup)
vBox0.addWidget(elementGroup)
vBox0.addWidget(self.genProgressBar)
vBox0.addWidget(self.btnGenScan)
self.genGroup = QtGui.QGroupBox("Generate Properties")
self.genGroup.setLayout(vBox0)
return self.genGroup
def createScanTypeWidget(self):
print 'TODO: create '
def createDatasetWidget(self):
DsetStartVal = '1000'
hBox = QtGui.QHBoxLayout()
self.DsetXIn = QtGui.QLineEdit()
self.DsetYIn = QtGui.QLineEdit()
self.DsetXIn.setText(DsetStartVal)
self.DsetYIn.setText(DsetStartVal)
hBox.addWidget(QtGui.QLabel("Width"))
hBox.addWidget(self.DsetXIn)
hBox.addWidget(QtGui.QLabel("Height"))
hBox.addWidget(self.DsetYIn)
datasetGroup = QtGui.QGroupBox("Dataset Size")
datasetGroup.setLayout(hBox)
return datasetGroup
def createVolDatasetWidget(self):
DsetStartVal = '1000'
hBox = QtGui.QHBoxLayout()
self.volDsetXIn = QtGui.QLineEdit()
self.volDsetYIn = QtGui.QLineEdit()
self.volDsetZIn = QtGui.QLineEdit()
self.volDsetXIn.setText(DsetStartVal)
self.volDsetYIn.setText(DsetStartVal)
self.volDsetZIn.setText(DsetStartVal)
hBox.addWidget(QtGui.QLabel("Width"))
hBox.addWidget(self.volDsetXIn)
hBox.addWidget(QtGui.QLabel("Height"))
hBox.addWidget(self.volDsetYIn)
hBox.addWidget(QtGui.QLabel("Depth"))
hBox.addWidget(self.volDsetZIn)
datasetGroup = QtGui.QGroupBox("Volume Size")
datasetGroup.setLayout(hBox)
return datasetGroup
def createTomoScanWidget(self):
self.NumImagesIn = QtGui.QLineEdit()
self.StartRotIn = QtGui.QLineEdit()
self.StopRotIn = QtGui.QLineEdit()
hBox1 = QtGui.QHBoxLayout()
hBox2 = QtGui.QHBoxLayout()
vBox = QtGui.QVBoxLayout()
self.NumImagesIn.setText('100')
self.StartRotIn.setText('0.0')
self.StopRotIn.setText('180.0')
hBox1.addWidget(QtGui.QLabel('Number Of Images:'))
hBox1.addWidget(self.NumImagesIn)
hBox2.addWidget(QtGui.QLabel('Start Rotation (degreees):'))
hBox2.addWidget(self.StartRotIn)
hBox2.addWidget(QtGui.QLabel('Stop Rotation:'))
hBox2.addWidget(self.StopRotIn)
vBox.addLayout(hBox1)
vBox.addLayout(hBox2)
tomoGroup = QtGui.QGroupBox("Tomo Scan")
tomoGroup.setLayout(vBox)
return tomoGroup
def createLensPropsWidget(self):
vBox0 = QtGui.QVBoxLayout()
hBox1 = QtGui.QHBoxLayout()
self.deltaNMIn = QtGui.QLineEdit()
self.deltaNMIn.setText('1.0')
hBox1.addWidget(QtGui.QLabel('Delta nm:'))
hBox1.addWidget(self.deltaNMIn)
vBox0.addLayout(hBox1)
self.combo = QtGui.QComboBox()
self.combo.addItem("Coherent")
self.combo.addItem("Incoherent")
vBox0.addWidget(self.combo)
vBox1 = QtGui.QVBoxLayout()
self.UseObj1Chk = QtGui.QCheckBox("Use")
self.UseObj1Chk.setChecked(True)
vBox1.addWidget(self.UseObj1Chk)
vBox1.addWidget(QtGui.QLabel('Outer nm:'))
self.outNM1In = QtGui.QLineEdit()
self.outNM1In.setText('4.0')
vBox1.addWidget(self.outNM1In)
self.numPhotons1In = QtGui.QLineEdit()
self.numPhotons1In.setText('1.0')
vBox1.addWidget(QtGui.QLabel('# Photons'))
vBox1.addWidget(self.numPhotons1In)
vBox2 = QtGui.QVBoxLayout()
self.UseObj2Chk = QtGui.QCheckBox("Use")
self.UseObj2Chk.setChecked(True)
vBox2.addWidget(self.UseObj2Chk)
vBox2.addWidget(QtGui.QLabel('Outer nm:'))
self.outNM2In = QtGui.QLineEdit()
self.outNM2In.setText('30.0')
vBox2.addWidget(self.outNM2In)
self.numPhotons2In = QtGui.QLineEdit()
self.numPhotons2In.setText('1.0')
vBox2.addWidget(QtGui.QLabel('# Photons'))
vBox2.addWidget(self.numPhotons2In)
vBox3 = QtGui.QVBoxLayout()
self.UseObj3Chk = QtGui.QCheckBox("Use")
self.UseObj3Chk.setChecked(True)
vBox3.addWidget(self.UseObj3Chk)
vBox3.addWidget(QtGui.QLabel('Outer nm:'))
self.outNM3In = QtGui.QLineEdit()
self.outNM3In.setText('100.0')
vBox3.addWidget(self.outNM3In)
self.numPhotons3In = QtGui.QLineEdit()
self.numPhotons3In.setText('1.0')
vBox3.addWidget(QtGui.QLabel('# Photons'))
vBox3.addWidget(self.numPhotons3In)
hBox0 = QtGui.QHBoxLayout()
hBox0.addLayout(vBox0)
hBox0.addLayout(vBox1)
hBox0.addLayout(vBox2)
hBox0.addLayout(vBox3)
group = QtGui.QGroupBox("Objectives")
group.setLayout(hBox0)
return group
def createScanPropsWidget(self):
self.btnStartScan = QtGui.QPushButton('Start Scan')
self.btnStartScan.clicked.connect(self.runScan)
self.btnStopScan = QtGui.QPushButton('Stop Scan')
self.btnStopScan.clicked.connect(self.stopScan)
hBox3 = QtGui.QHBoxLayout()
self.fileNameIn = QtGui.QLineEdit()
self.fileNameIn.setText('TestScan.h5')
hBox3.addWidget(QtGui.QLabel('FileName:'))
hBox3.addWidget(self.fileNameIn)
self.scanProgressBar = QtGui.QProgressBar(self)
self.scanProgressBar.setRange(0,100)
hBox2 = QtGui.QHBoxLayout()
hBox2.addWidget(self.btnStartScan)
hBox2.addWidget(self.btnStopScan)
vBox = QtGui.QVBoxLayout()
vBox.addLayout(hBox3)
vBox.addWidget(self.createDatasetWidget())
vBox.addWidget(self.createLensPropsWidget())
vBox.addWidget(self.createTomoScanWidget())
vBox.addWidget(self.scanProgressBar)
vBox.addLayout(hBox2)
self.scanGroup = QtGui.QGroupBox("Scan Properties")
self.scanGroup.setLayout(vBox)
self.scanGroup.setEnabled(False)
return self.scanGroup
def createVolumePropsWidget(self):
self.btnStartVolume = QtGui.QPushButton('Export Volume')
self.btnStartVolume.clicked.connect(self.runVolumizer)
self.btnStopVolume = QtGui.QPushButton('Stop')
#self.btnStopVolume.clicked.connect(self.stopScan)
hBox3 = QtGui.QHBoxLayout()
self.volFileNameIn = QtGui.QLineEdit()
self.volFileNameIn.setText('Volume.h5')
hBox3.addWidget(QtGui.QLabel('FileName:'))
hBox3.addWidget(self.volFileNameIn)
#self.volProgressBar = QtGui.QProgressBar(self)
#self.volProgressBar.setRange(0,100)
hBox2 = QtGui.QHBoxLayout()
hBox2.addWidget(self.btnStartVolume)
#hBox2.addWidget(self.btnStopVolume)
vBox = QtGui.QVBoxLayout()
vBox.addLayout(hBox3)
vBox.addWidget(self.createVolDatasetWidget())
#vBox.addWidget(self.volProgressBar)
vBox.addLayout(hBox2)
self.volGroup = QtGui.QGroupBox("Volume Properties")
self.volGroup.setLayout(vBox)
self.volGroup.setEnabled(False)
return self.volGroup
def addElementActors(self):
print 'TODO: add actors'
def removeElementActors(self):
print 'TODO: remove actors'
def clearScene(self):
if self.isSceneGenerated:
print 'Override current scene?'
for mList in self.allModelList:
for m in mList:
self.ren.RemoveActor(m.actor)
del m
self.allModelList = []
self.iren.Render()
def onScanProgress(self, i):
self.scanMutex.lock()
v = self.scanProgressBar.value()
self.scanProgressBar.setValue(v+1)
self.scanMutex.unlock()
def onScanFinish(self):
#if all finished then save file
self.scanMutex.lock()
self.finishedScans += 1
if self.finishedScans >= len(self.allModelList):
self.hfile.close()
for i in range(len(self.hdfFiles)):
self.hdfFiles[i].close()
del self.mutex
del self.hfile
for s in self.scanners:
del s
self.genGroup.setEnabled(True)
self.volGroup.setEnabled(True)
self.btnStartScan.setEnabled(True)
print 'Scan finished in ',int(time.time() - self.startScanTime),' seconds'
self.scanMutex.unlock()
def onGenProgress(self, i):
self.genProgressBar.setValue(i)
def onGenFinish(self, allModelList, bounds):
for mList in allModelList:
for m in mList:
self.ren.AddActor(m.actor)
self.sceneBounds = bounds
self.allModelList = allModelList
self.ren.ResetCamera()
self.iren.Render()
self.isSceneGenerated = True
self.btnGenScan.setEnabled(True)
self.scanGroup.setEnabled(True)
self.volGroup.setEnabled(True)
print 'Finished generating scene'
def generateScan(self):
self.btnGenScan.setEnabled(False)
self.scanGroup.setEnabled(False)
self.volGroup.setEnabled(False)
self.genTask.gridX = int(self.GridXIn.text())
self.genTask.gridY = int(self.GridYIn.text())
self.genTask.gridZ = int(self.GridZIn.text())
self.genTask.numElements = int(self.NumElementsIn.text())
self.genTask.startBaseScale = float(self.BaseScaleStart.text())
self.genTask.endBaseScale = float(self.BaseScaleEnd.text())
self.genTask.startBaseRotate = float(self.BaseRotateStart.text())
self.genTask.endBaseRotate = float(self.BaseRotateEnd.text())
self.genTask.elementsPerFace = int(self.ElementsPerFaceIn.text())
self.genTask.startElementScale = float(self.ElementScaleStart.text())
self.genTask.endElementScale = float(self.ElementScaleEnd.text())
self.genTask.useMultiSphereElement = self.UseMultiSpheresChk.isChecked()
self.clearScene()
#print 'generating scene with grid size',self.gridX, self.gridY, self.gridZ
self.genProgressBar.setRange(0, self.genTask.gridX * self.genTask.gridY * self.genTask.gridZ)
self.genProgressBar.setValue(0)
#self.generateWithCubesAndSpheres()
self.genTask.start()
def stopScan(self):
print 'Trying to stop the scan'
for s in self.scanners:
s.Stop = True
def onFinishVolume(self):
self.btnStartVolume.setEnabled(True)
self.genGroup.setEnabled(True)
self.scanGroup.setEnabled(True)
def runVolumizer(self):
self.genGroup.setEnabled(False)
self.scanGroup.setEnabled(False)
self.btnStartVolume.setEnabled(False)
self.volumizer.bounds = self.sceneBounds
self.volumizer.dimX = int(self.volDsetXIn.text())
self.volumizer.dimY = int(self.volDsetYIn.text())
self.volumizer.dimZ = int(self.volDsetZIn.text())
self.volumizer.filename = str(self.volFileNameIn.text())
self.volumizer.allModelList = self.allModelList
self.volumizer.start()
def runScan(self):
self.startScanTime = time.time()
dimX = int(self.DsetXIn.text())
dimY = int(self.DsetYIn.text())
numImages = int(self.NumImagesIn.text())
startRot = float(self.StartRotIn.text())
stopRot = float(self.StopRotIn.text())
#scene
if self.isSceneGenerated:
self.genGroup.setEnabled(False)
self.volGroup.setEnabled(False)
self.btnStartScan.setEnabled(False)
scanCount = len(self.allModelList)
#create hdf5 file
filename = str(self.fileNameIn.text())
datasetNames = ['exchange/data']
for i in range(scanCount - 1):
datasetNames += ['exchange/element'+str(i)]
self.hfile = h5py.File(filename, 'w')
self.scanProgressBar.setRange(0, numImages * scanCount )
self.scanProgressBar.setValue(0)
self.finishedScans = 0
self.mutex = QtCore.QMutex()
self.hdfFiles = []
fileAddOn = ''
calcFunc = Optics.coherent
if self.combo.currentIndex() == 0:
print 'coherent'
fileAddOn = '_co'
calcFunc = Optics.coherent
else:
print 'incoherent'
fileAddOn = '_inc'
calcFunc = Optics.incoherent
delta_obj_nm = float(self.deltaNMIn.text())
max_freq = 1.0 / 2.e-3 * delta_obj_nm
objectives = []
if self.UseObj1Chk.isChecked():
obj = Optics.Objective()
val1 = float(self.outNM1In.text())
obj.generate(max_freq, dimX, dimY, val1, 500.0, True)
photons = str(self.numPhotons1In.text())
obj.numPhotons = float(photons)
objectives += [ obj ]
self.hdfFiles += [ h5py.File(filename +fileAddOn+ '_lens'+str(val1)+'_ph'+photons+'.h5', 'w') ]
if self.UseObj2Chk.isChecked():
obj = Optics.Objective()
val1 = float(self.outNM2In.text())
obj.generate(max_freq, dimX, dimY, val1, 500.0, True)
photons = str(self.numPhotons2In.text())
obj.numPhotons = float(photons)
objectives += [ obj ]
self.hdfFiles += [ h5py.File(filename +fileAddOn+ '_lens'+str(val1)+'_ph'+photons+'.h5', 'w') ]
if self.UseObj3Chk.isChecked():
obj = Optics.Objective()
val1 = float(self.outNM3In.text())
obj.generate(max_freq, dimX, dimY, val1, 500.0, True)
photons = str(self.numPhotons3In.text())
obj.numPhotons = float(photons)
objectives += [ obj ]
self.hdfFiles += [ h5py.File(filename +fileAddOn+ '_lens'+str(val1)+'_ph'+photons+'.h5', 'w') ]
self.scanners = []
for i in range(scanCount):
self.scanners += [Scanner()]
self.scanners[i].objectives = objectives
self.scanners[i].calcFunc = calcFunc
self.scanners[i].hdfFiles = self.hdfFiles
self.scanners[i].dsetLock = self.mutex
self.scanners[i].hfile = self.hfile
self.scanners[i].datasetName = datasetNames[i]
self.scanners[i].baseModels = self.allModelList[i]
self.scanners[i].bounds = self.sceneBounds
self.scanners[i].dimX = dimX
self.scanners[i].dimY = dimY
self.scanners[i].startRot = startRot
self.scanners[i].stopRot = stopRot
self.scanners[i].numImages = numImages
self.scanners[i].notifyProgress.connect(self.onScanProgress)
self.scanners[i].notifyFinish.connect(self.onScanFinish)
#We only want the first scanner to save theta
self.scanners[0].bSaveTheta = True
self.scanners[i].start()
else:
print 'Please generate a scene first'
|
aglowacki/ScanSimulator
|
MainWindow.py
|
Python
|
gpl-2.0
| 17,635
|
[
"VTK"
] |
5bf2d02bad8db54def53ce370d722d5e04e74657861f18cd61af5222e5ef7075
|
from math import floor
from world import World
import queue
import socketserver
import datetime
import random
import re
import requests
import sqlite3
import sys
import threading
import time
import traceback
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = True
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print(line)
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(socketserver.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.decode().replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except queue.Empty:
pass
except queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data.encode())
except Exception:
self.request.close()
raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed):
self.world = World(seed)
self.clients = []
self.queue = queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = list(map(int, (p, q, key)))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
x, y, z, w = list(map(int, (x, y, z, w)))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = list(map(int, (x, y, z, w)))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = list(map(int, (x, y, z, face)))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = list(map(float, (x, y, z, rx, ry)))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = list(map(int, (p, q)))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print('begin;')
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print(delete_query % (x, y, z))
conn.close()
print('commit;')
print('%d of %d blocks will be cleaned up' % (count, total), file=sys.stderr)
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
log('SERV', host, port)
model = Model(None)
model.start()
server = Server((host, port), Handler)
server.model = model
server.serve_forever()
if __name__ == '__main__':
main()
|
a101010/Craft
|
server.py
|
Python
|
mit
| 24,788
|
[
"VisIt"
] |
b6fd7fb78399ecbda1f0b2b88346c92acdda84299d63ff321a00b596715607a9
|
"""
A simple implementation of the Kalman Filter, Kalman Smoother, and EM
algorithm for Linear-Gaussian state space models.
Primarily adapted from Daniel Duckworth's pykalman library.
"""
import warnings
import numpy as np
import numpy.random
from numpy import shape, zeros, outer, dot, array, all, asarray
from scipy import linalg
# Simple Utility functions
def array1d(X, dtype=None, order=None):
"""Returns at least 1-d array with data from X"""
return asarray(np.atleast_1d(X), dtype=dtype, order=order)
def array2d(X, dtype=None, order=None):
"""Returns at least 2-d array with data from X"""
return asarray(np.atleast_2d(X), dtype=dtype, order=order)
def _determine_dimensionality(variables, default):
"""Derive the dimensionality of the state space
Parameters
----------
variables : list of ({None, array}, conversion function, index)
variables, functions to convert them to arrays, and indices in those
arrays to derive dimensionality from.
default : {None, int}
default dimensionality to return if variables is empty
Returns
-------
dim : int
dimensionality of state space as derived from variables or default.
"""
# gather possible values based on the variables
candidates = []
for (v, converter, idx) in variables:
if v is not None:
v = converter(v)
candidates.append(v.shape[idx])
# also use the manually specified default
if default is not None:
candidates.append(default)
# ensure consistency of all derived values
if len(candidates) == 0:
return 1
else:
if not all(array(candidates) == candidates[0]):
raise ValueError(
"The shape of all " +
"parameters is not consistent. " +
"Please re-check their values."
)
return candidates[0]
class KalmanFilter(object):
""" Implements Kalman Filter, Kalman Smoother, and EM algorithm for
linear Gaussian models
"""
def __init__(self, transition_matrix=None, observation_matrix=None,
transition_covariance=None, observation_covariance=None,
transition_offset=None, observation_offset=None,
initial_state_mean=None, initial_state_covariance=None,
em_vars=['transition_matrix', 'transition_covariance',
'observation_matrix', 'observation_covariance',
'initial_state_mean', 'initial_state_covariance'],
n_dim_state=None, n_dim_obs=None):
n_dim_state = _determine_dimensionality(
[(transition_matrix, array2d, -2),
(transition_offset, array1d, -1),
(transition_covariance, array2d, -2),
(initial_state_mean, array1d, -1),
(initial_state_covariance, array2d, -2),
(observation_matrix, array2d, -1)],
n_dim_state
)
n_dim_obs = _determine_dimensionality(
[(observation_matrix, array2d, -2),
(observation_offset, array1d, -1),
(observation_covariance, array2d, -2)],
n_dim_obs
)
# Save the input matrices
self.transition_matrix = transition_matrix
self.observation_matrix = observation_matrix
self.transition_covariance = transition_covariance
self.observation_covariance = observation_covariance
self.transition_offset = transition_offset
self.observation_offset = observation_offset
self.initial_state_mean = initial_state_mean
self.initial_state_covariance = initial_state_covariance
self.em_vars = em_vars
self.n_dim_state = n_dim_state
self.n_dim_obs = n_dim_obs
def sample(self, n_timesteps, initial_state=None):
""" Sample a state sequence"""
transition_matrix = self.transition_matrix
transition_offset = self.transition_offset
transition_covariance = self.transition_covariance
observation_matrix = self.observation_matrix
observation_offset = self.observation_offset
observation_covariance = self.observation_covariance
initial_state_mean = self.initial_state_mean
initial_state_covariance = self.initial_state_covariance
n_dim_state = self.n_dim_state
n_dim_obs = self.n_dim_obs
states = zeros((n_timesteps, n_dim_state))
observations = zeros((n_timesteps, n_dim_obs))
# Sample initial state
if initial_state is None:
initial_state = numpy.random.multivariate_normal(
initial_state_mean, initial_state_covariance)
# Generate the samples
for t in range(n_timesteps):
if t == 0:
states[t] = initial_state
else:
states[t] = dot(transition_matrix, states[t - 1]) + \
transition_offset + \
numpy.random.multivariate_normal(
zeros(n_dim_state),
transition_covariance)
observations[t] = dot(observation_matrix, states[t]) +\
observation_offset + \
numpy.random.multivariate_normal(zeros(n_dim_obs),
observation_covariance)
return states, observations
def filter(self, observations):
"""Perform the Kalman filter
Parameters
__________
observations : observations corresponding to times [0...n_timesteps-1]
Returns
_______
filtered_state_means
filtered_state_covariances
"""
transition_matrix = self.transition_matrix
transition_offset = self.transition_offset
transition_covariance = self.transition_covariance
observation_matrix = self.observation_matrix
observation_offset = self.observation_offset
observation_covariance = self.observation_covariance
initial_state_mean = self.initial_state_mean
initial_state_covariance = self.initial_state_covariance
n_timesteps = observations.shape[0]
n_dim_state = self.n_dim_state
n_dim_obs = self.n_dim_obs
predicted_state_means = zeros((n_timesteps, n_dim_state))
predicted_state_covariances = zeros((n_timesteps, n_dim_state,
n_dim_state))
kalman_gains = zeros((n_timesteps, n_dim_state, n_dim_obs))
filtered_state_means = zeros((n_timesteps, n_dim_state))
filtered_state_covariances = zeros((n_timesteps, n_dim_state,
n_dim_state))
for t in range(n_timesteps):
if t == 0:
predicted_state_means[t] = initial_state_mean
predicted_state_covariances[t] = initial_state_covariance
else:
predicted_state_means[t], predicted_state_covariances[t] = \
self._filter_predict(transition_matrix, transition_covariance,
transition_offset, filtered_state_means[t-1],
filtered_state_covariances[t-1])
(kalman_gains[t], filtered_state_means[t],
filtered_state_covariances[t]) = self._filter_correct(
observation_matrix, observation_covariance,
observation_offset, predicted_state_means[t],
predicted_state_covariances[t], observations[t])
return (predicted_state_means, predicted_state_covariances,
kalman_gains, filtered_state_means, filtered_state_covariances)
def _filter_predict(self, transition_matrix, transition_covariance,
transition_offset, current_state_mean, current_state_covariance):
"""Perform the forward prediction step of the kalman filter."""
predicted_state_mean = dot(transition_matrix, current_state_mean) +\
transition_offset
predicted_state_covariance = dot(transition_matrix,
dot(current_state_covariance, transition_matrix.T)) +\
transition_covariance
return (predicted_state_mean, predicted_state_covariance)
def _filter_correct(self, observation_matrix, observation_covariance,
observation_offset, predicted_state_mean, predicted_state_covariance,
observation):
"""Perform the correctino for the current evidence"""
predicted_observation_mean = dot(observation_matrix,
predicted_state_mean) + observation_offset
predicted_observation_covariance = dot(observation_matrix,
dot(predicted_state_covariance, observation_matrix.T)) +\
observation_covariance
kalman_gain = dot(predicted_state_covariance,
dot(observation_matrix.T,
linalg.pinv(predicted_observation_covariance)))
corrected_state_mean = predicted_state_mean + \
dot(kalman_gain, observation - predicted_observation_mean)
corrected_state_covariance = predicted_state_covariance -\
dot(kalman_gain, dot(observation_matrix,
predicted_state_covariance))
return (kalman_gain, corrected_state_mean, corrected_state_covariance)
def smooth(self, observations):
"""Apply the Kalman Smoother"""
transition_matrix = self.transition_matrix
transition_offset = self.transition_offset
transition_covariance = self.transition_covariance
observation_matrix = self.observation_matrix
observation_offset = self.observation_offset
observation_covariance = self.observation_covariance
initial_state_mean = self.initial_state_mean
initial_state_covariance = self.initial_state_covariance
(predicted_state_means, predicted_state_covariances, _,
filtered_state_means, filtered_state_covariances) = \
self.filter(observations)
n_timesteps, n_dim_state = shape(filtered_state_means)
smoothed_state_means = zeros((n_timesteps, n_dim_state))
smoothed_state_covariances = zeros((n_timesteps, n_dim_state,
n_dim_state))
kalman_smoothing_gains = zeros((n_timesteps-1, n_dim_state,
n_dim_state))
smoothed_state_means[-1] = filtered_state_means[-1]
smoothed_state_covariances[-1] = filtered_state_covariances[-1]
for t in reversed(range(n_timesteps-1)):
(smoothed_state_means[t], smoothed_state_covariances[t],
kalman_smoothing_gains[t]) = self._smooth_update(
transition_matrix, filtered_state_means[t],
filtered_state_covariances[t], predicted_state_means[t+1],
predicted_state_covariances[t+1], smoothed_state_means[t+1],
smoothed_state_covariances[t+1])
return (smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains)
def _smooth_update(self, transition_matrix, filtered_state_mean,
filtered_state_covariance, predicted_state_mean,
predicted_state_covariance, next_smoothed_state_mean,
next_smoothed_state_covariance):
"""Perform the backwards smoothing update"""
kalman_smoothing_gain = dot(filtered_state_covariance,
dot(transition_matrix.T,
linalg.pinv(predicted_state_covariance)))
smoothed_state_mean = filtered_state_mean +\
dot(kalman_smoothing_gain,
next_smoothed_state_mean - predicted_state_mean)
smoothed_state_covariance = filtered_state_covariance + dot(
kalman_smoothing_gain, dot(
next_smoothed_state_covariance - predicted_state_covariance,
kalman_smoothing_gain.T))
return (smoothed_state_mean, smoothed_state_covariance,
kalman_smoothing_gain)
def _smooth_pair(self,smoothed_state_covariances, kalman_smoothing_gain):
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
pairwise_covariances = zeros((n_timesteps, n_dim_state,
n_dim_state))
for t in range(1, n_timesteps):
pairwise_covariances[t] = (dot(smoothed_state_covariances[t],
kalman_smoothing_gain[t-1].T))
return pairwise_covariances
def em(self, observations, n_iter=10):
# EM iterations
for i in range(n_iter):
(smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains) = self.smooth(observations)
sigma_pair_smooth = self._smooth_pair(smoothed_state_covariances,
kalman_smoothing_gains)
(self.transition_matrix, self.observation_matrix,
self.transition_offset, self.observation_offset,
self.transition_covariance, self.observation_covariance,
self.initial_state_mean, self.initial_state_covariance) =\
self._em(observations, self.transition_offset,
self.observation_offset, smoothed_state_means,
smoothed_state_covariances, sigma_pair_smooth)
def _em(self, observations, transition_offset, observation_offset,
smoothed_state_means, smoothed_state_covariances,
pairwise_covariances):
observation_matrix = self._em_observation_matrix(observations,
observation_offset, smoothed_state_means,
smoothed_state_covariances)
observation_covariance = self._em_observation_covariance(observations,
observation_offset, self.transition_matrix,
self.transition_offset, smoothed_state_means,
smoothed_state_covariances)
transition_matrix = self._em_transition_matrix(transition_offset,
smoothed_state_means, smoothed_state_covariances,
pairwise_covariances)
transition_covariance = self._em_transition_covariance(
transition_matrix, transition_offset, smoothed_state_means,
smoothed_state_covariances, pairwise_covariances)
initial_state_mean = self._em_initial_state_mean(smoothed_state_means)
initial_state_covariance = self._em_initial_state_covariance(
initial_state_mean, smoothed_state_means,
smoothed_state_covariances)
transition_offset = self._em_transition_offset(transition_matrix,
smoothed_state_means)
observation_offset = self._em_observation_offset(observation_matrix,
smoothed_state_means, observations)
return (transition_matrix, observation_matrix,
transition_offset, observation_offset,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance)
def _em_observation_matrix(self, observations, observation_offset,
smoothed_state_means, smoothed_state_covariances):
n_dim_state = self.n_dim_state
n_dim_obs = self.n_dim_obs
n_timesteps = observations.shape[0]
res1 = zeros((n_dim_obs, n_dim_state))
res2 = zeros((n_dim_state, n_dim_state))
for t in range(n_timesteps):
res1 += outer(observations[t] - observation_offset,
smoothed_state_means[t])
res2 += smoothed_state_covariances[t] + outer(
smoothed_state_means[t], smoothed_state_means[t])
return dot(res1, linalg.pinv(res2))
def _em_observation_covariance(self, observations, observation_offset,
transition_matrix, transition_offset, smoothed_state_means,
smoothed_state_covariances):
n_dim_state = self.n_dim_state
n_dim_obs = self.n_dim_obs
n_timesteps = observations.shape[0]
res = zeros((n_dim_obs, n_dim_obs))
for t in range(n_timesteps):
err = observations[t] - dot(transition_matrix,
smoothed_state_means[t]) - transition_offset
res += outer(err,err) + dot(transition_matrix,
dot(smoothed_state_covariances[t], transition_matrix.T))
return (1.0/n_timesteps) * res
def _em_transition_matrix(self, transition_offset, smoothed_state_means,
smoothed_state_covariances, pairwise_covariances):
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
res1 = zeros((n_dim_state, n_dim_state))
res2 = zeros((n_dim_state, n_dim_state))
for t in range(1, n_timesteps):
res1 += pairwise_covariances[t] +\
outer(smoothed_state_means[t], smoothed_state_means[t-1]) -\
outer(transition_offset, smoothed_state_means[t-1])
res2 += smoothed_state_covariances[t-1] +\
outer(smoothed_state_means[t-1], smoothed_state_means[t-1])
return dot(res1, linalg.pinv(res2))
def _em_transition_covariance(self, transition_matrix, transition_offset,
smoothed_state_means, smoothed_state_covariances,
pairwise_covariances):
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
res = zeros((n_dim_state, n_dim_state))
for t in range(n_timesteps - 1):
err = (smoothed_state_means[t + 1]
- dot(transition_matrix, smoothed_state_means[t])
- transition_offset)
Vt1t_A = (dot(pairwise_covariances[t + 1],
transition_matrix.T))
res += (outer(err, err) + dot(transition_matrix,
dot(smoothed_state_covariances[t],
transition_matrix.T))
+ smoothed_state_covariances[t + 1]
- Vt1t_A - Vt1t_A.T)
return (1.0 / (n_timesteps - 1)) * res
def _em_initial_state_mean(self, smoothed_state_means):
return smoothed_state_means[0]
def _em_initial_state_covariance(self, initial_state_mean,
smoothed_state_means, smoothed_state_covariances):
x0 = smoothed_state_means[0]
x0_x0 = smoothed_state_covariances[0] + outer(x0, x0)
return (x0_x0 - outer(initial_state_mean, x0)
- outer(x0, initial_state_mean)
+ outer(initial_state_mean, initial_state_mean))
def _em_transition_offset(self, transition_matrix,
smoothed_state_means):
n_timesteps, n_dim_state = smoothed_state_means.shape
transition_offset = zeros(n_dim_state)
for t in range(1, n_timesteps):
transition_offset += (smoothed_state_means[t]
- dot(transition_matrix, smoothed_state_means[t - 1]))
if n_timesteps > 1:
return (1.0 / (n_timesteps - 1)) * transition_offset
else:
return zeros(n_dim_state)
def _em_observation_offset(self, observation_matrix,
smoothed_state_means, observations):
n_timesteps, n_dim_obs = observations.shape
observation_offset = zeros(n_dim_obs)
for t in range(n_timesteps):
observation_offset += (observations[t]
- np.dot(observation_matrix, smoothed_state_means[t]))
if n_timesteps > 0:
return (1.0 / n_timesteps) * observation_offset
else:
return observation_offset
|
rbharath/switch
|
Switch/simple_kalman.py
|
Python
|
bsd-2-clause
| 17,759
|
[
"Gaussian"
] |
1588cf544ddba034d0252ec0698d1cc8a38ff2a393b01f9d5715181730bab379
|
# Copyright 2010-2011 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides code to access the TogoWS integrated websevices of DBCLS, Japan.
This module aims to make the TogoWS (from DBCLS, Japan) easier to use. See:
http://togows.dbcls.jp/
The TogoWS REST service provides simple access to a range of databases, acting
as a proxy to shield you from all the different provider APIs. This works using
simple URLs (which this module will construct for you). For more details, see
http://togows.dbcls.jp/site/en/rest.html
The functionality is somewhat similar to Biopython's Bio.Entrez module which
provides access to the NCBI's Entrez Utilities (E-Utils) which also covers a
wide range of databases.
Currently TogoWS does not provide any usage guidelines (unlike the NCBI whose
requirements are reasonably clear). To avoid risking overloading the service,
Biopython will only allow three calls per second.
The TogoWS SOAP service offers a more complex API for calling web services
(essentially calling remote functions) provided by DDBJ, KEGG and PDBj. For
example, this allows you to run a remote BLAST search at the DDBJ. This is
not yet covered by this module, however there are lots of Python examples
on the TogoWS website using the SOAPpy python library. See:
http://togows.dbcls.jp/site/en/soap.html
http://soapy.sourceforge.net/
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
import time
from Bio._py3k import _binary_to_string_handle, _as_bytes
# Importing these functions with leading underscore as not intended for reuse
from Bio._py3k import urlopen as _urlopen
from Bio._py3k import quote as _quote
__docformat__ = "restructuredtext en"
# Constant
_BASE_URL = "http://togows.dbcls.jp"
# Caches:
_search_db_names = None
_entry_db_names = None
_entry_db_fields = {}
_entry_db_formats = {}
_convert_formats = []
def _get_fields(url):
"""Queries a TogoWS URL for a plain text list of values (PRIVATE)."""
handle = _open(url)
fields = handle.read().strip().split()
handle.close()
return fields
def _get_entry_dbs():
return _get_fields(_BASE_URL + "/entry")
def _get_entry_fields(db):
return _get_fields(_BASE_URL + "/entry/%s?fields" % db)
def _get_entry_formats(db):
return _get_fields(_BASE_URL + "/entry/%s?formats" % db)
def _get_convert_formats():
return [pair.split(".") for pair in
_get_fields(_BASE_URL + "/convert/")]
def entry(db, id, format=None, field=None):
"""TogoWS fetch entry (returns a handle).
- db - database (string), see list below.
- id - identier (string) or a list of identifiers (either as a list of
strings or a single string with comma separators).
- format - return data file format (string), options depend on the database
e.g. "xml", "json", "gff", "fasta", "ttl" (RDF Turtle)
- field - specific field from within the database record (string)
e.g. "au" or "authors" for pubmed.
At the time of writing, this includes the following::
KEGG: compound, drug, enzyme, genes, glycan, orthology, reaction,
module, pathway
DDBj: ddbj, dad, pdb
NCBI: nuccore, nucest, nucgss, nucleotide, protein, gene, onim,
homologue, snp, mesh, pubmed
EBI: embl, uniprot, uniparc, uniref100, uniref90, uniref50
For the current list, please see http://togows.dbcls.jp/entry/
This function is essentially equivalent to the NCBI Entrez service
EFetch, available in Biopython as Bio.Entrez.efetch(...), but that
does not offer field extraction.
"""
global _entry_db_names, _entry_db_fields, fetch_db_formats
if _entry_db_names is None:
_entry_db_names = _get_entry_dbs()
if db not in _entry_db_names:
raise ValueError("TogoWS entry fetch does not officially support "
"database '%s'." % db)
if field:
try:
fields = _entry_db_fields[db]
except KeyError:
fields = _get_entry_fields(db)
_entry_db_fields[db] = fields
if db == "pubmed" and field == "ti" and "title" in fields:
# Backwards compatibility fix for TogoWS change Nov/Dec 2013
field = "title"
import warnings
warnings.warn("TogoWS dropped 'pubmed' field alias 'ti', please use 'title' instead.")
if field not in fields:
raise ValueError("TogoWS entry fetch does not explicitly support "
"field '%s' for database '%s'. Only: %s"
% (field, db, ", ".join(sorted(fields))))
if format:
try:
formats = _entry_db_formats[db]
except KeyError:
formats = _get_entry_formats(db)
_entry_db_formats[db] = formats
if format not in formats:
raise ValueError("TogoWS entry fetch does not explicitly support "
"format '%s' for database '%s'. Only: %s"
% (format, db, ", ".join(sorted(formats))))
if isinstance(id, list):
id = ",".join(id)
url = _BASE_URL + "/entry/%s/%s" % (db, _quote(id))
if field:
url += "/" + field
if format:
url += "." + format
return _open(url)
def search_count(db, query):
"""TogoWS search count (returns an integer).
db - database (string), see http://togows.dbcls.jp/search
query - search term (string)
You could then use the count to download a large set of search results in
batches using the offset and limit options to Bio.TogoWS.search(). In
general however the Bio.TogoWS.search_iter() function is simpler to use.
"""
global _search_db_names
if _search_db_names is None:
_search_db_names = _get_fields(_BASE_URL + "/search")
if db not in _search_db_names:
# TODO - Make this a ValueError? Right now despite the HTML website
# claiming to, the "gene" or "ncbi-gene" don't work and are not listed.
import warnings
warnings.warn("TogoWS search does not officially support database '%s'. "
"See %s/search/ for options." % (db, _BASE_URL))
url = _BASE_URL + "/search/%s/%s/count" % (db, _quote(query))
handle = _open(url)
data = handle.read()
handle.close()
try:
count = int(data.strip())
except ValueError:
raise ValueError("Expected an integer from URL %s, got: %r" % (url, data))
return count
def search_iter(db, query, limit=None, batch=100):
"""TogoWS search iteratating over the results (generator function).
- db - database (string), see http://togows.dbcls.jp/search
- query - search term (string)
- limit - optional upper bound on number of search results
- batch - number of search results to pull back each time talk to
TogoWS (currently limited to 100).
You would use this function within a for loop, e.g.
>>> for id in search_iter("pubmed", "lung+cancer+drug", limit=10):
... print(id) # maybe fetch data with entry?
Internally this first calls the Bio.TogoWS.search_count() and then
uses Bio.TogoWS.search() to get the results in batches.
"""
count = search_count(db, query)
if not count:
raise StopIteration
# NOTE - We leave it to TogoWS to enforce any upper bound on each
# batch, they currently return an HTTP 400 Bad Request if above 100.
remain = count
if limit is not None:
remain = min(remain, limit)
offset = 1 # They don't use zero based counting
prev_ids = [] # Just cache the last batch for error checking
while remain:
batch = min(batch, remain)
# print("%r left, asking for %r" % (remain, batch))
ids = search(db, query, offset, batch).read().strip().split()
assert len(ids) == batch, "Got %i, expected %i" % (len(ids), batch)
# print("offset %i, %s ... %s" % (offset, ids[0], ids[-1]))
if ids == prev_ids:
raise RuntimeError("Same search results for previous offset")
for identifier in ids:
if identifier in prev_ids:
raise RuntimeError("Result %s was in previous batch"
% identifier)
yield identifier
offset += batch
remain -= batch
prev_ids = ids
def search(db, query, offset=None, limit=None, format=None):
"""TogoWS search (returns a handle).
This is a low level wrapper for the TogoWS search function, which
can return results in a several formats. In general, the search_iter
function is more suitable for end users.
- db - database (string), see http://togows.dbcls.jp/search/
- query - search term (string)
- offset, limit - optional integers specifying which result to start from
(1 based) and the number of results to return.
- format - return data file format (string), e.g. "json", "ttl" (RDF)
By default plain text is returned, one result per line.
At the time of writing, TogoWS applies a default count limit of 100
search results, and this is an upper bound. To access more results,
use the offset argument or the search_iter(...) function.
TogoWS supports a long list of databases, including many from the NCBI
(e.g. "ncbi-pubmed" or "pubmed", "ncbi-genbank" or "genbank", and
"ncbi-taxonomy"), EBI (e.g. "ebi-ebml" or "embl", "ebi-uniprot" or
"uniprot, "ebi-go"), and KEGG (e.g. "kegg-compound" or "compound").
For the current list, see http://togows.dbcls.jp/search/
The NCBI provide the Entrez Search service (ESearch) which is similar,
available in Biopython as the Bio.Entrez.esearch() function.
See also the function Bio.TogoWS.search_count() which returns the number
of matches found, and the Bio.TogoWS.search_iter() function which allows
you to iterate over the search results (taking care of batching for you).
"""
global _search_db_names
if _search_db_names is None:
_search_db_names = _get_fields(_BASE_URL + "/search")
if db not in _search_db_names:
# TODO - Make this a ValueError? Right now despite the HTML website
# claiming to, the "gene" or "ncbi-gene" don't work and are not listed.
import warnings
warnings.warn("TogoWS search does not explicitly support database '%s'. "
"See %s/search/ for options." % (db, _BASE_URL))
url = _BASE_URL + "/search/%s/%s" % (db, _quote(query))
if offset is not None and limit is not None:
try:
offset = int(offset)
except:
raise ValueError("Offset should be an integer (at least one), not %r" % offset)
try:
limit = int(limit)
except:
raise ValueError("Limit should be an integer (at least one), not %r" % limit)
if offset <= 0:
raise ValueError("Offset should be at least one, not %i" % offset)
if limit <= 0:
raise ValueError("Count should be at least one, not %i" % limit)
url += "/%i,%i" % (offset, limit)
elif offset is not None or limit is not None:
raise ValueError("Expect BOTH offset AND limit to be provided (or neither)")
if format:
url += "." + format
# print(url)
return _open(url)
def convert(data, in_format, out_format):
"""TogoWS convert (returns a handle).
data - string or handle containing input record(s)
in_format - string describing the input file format (e.g. "genbank")
out_format - string describing the requested output format (e.g. "fasta")
For a list of supported conversions (e.g. "genbank" to "fasta"), see
http://togows.dbcls.jp/convert/
Note that Biopython has built in support for conversion of sequence and
alignnent file formats (functions Bio.SeqIO.convert and Bio.AlignIO.convert)
"""
global _convert_formats
if not _convert_formats:
_convert_formats = _get_convert_formats()
if [in_format, out_format] not in _convert_formats:
msg = "\n".join("%s -> %s" % tuple(pair) for pair in _convert_formats)
raise ValueError("Unsupported conversion. Choose from:\n%s" % msg)
url = _BASE_URL + "/convert/%s.%s" % (in_format, out_format)
# TODO - Should we just accept a string not a handle? What about a filename?
if hasattr(data, "read"):
# Handle
return _open(url, post=data.read())
else:
# String
return _open(url, post=data)
def _open(url, post=None):
"""Helper function to build the URL and open a handle to it (PRIVATE).
Open a handle to TogoWS, will raise an IOError if it encounters an error.
In the absense of clear guidelines, this function enforces a limit of
"up to three queries per second" to avoid abusing the TogoWS servers.
"""
delay = 0.333333333 # one third of a second
current = time.time()
wait = _open.previous + delay - current
if wait > 0:
time.sleep(wait)
_open.previous = current + wait
else:
_open.previous = current
# print(url)
if post:
handle = _urlopen(url, _as_bytes(post))
else:
handle = _urlopen(url)
# We now trust TogoWS to have set an HTTP error code, that
# suffices for my current unit tests. Previously we would
# examine the start of the data returned back.
return _binary_to_string_handle(handle)
_open.previous = 0
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/TogoWS/__init__.py
|
Python
|
gpl-2.0
| 13,691
|
[
"BLAST",
"Biopython"
] |
592a1b9b9c1bb913442c84b5857c087e4666ddef89a449c07cdd1170a0f0173f
|
"""
test views
"""
import datetime
import json
import re
import pytz
import ddt
import urlparse
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.courses import get_course_by_id
from courseware.tests.factories import StudentModuleFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tabs import get_course_tab_list
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.urlresolvers import reverse, resolve
from django.utils.timezone import UTC
from django.test.utils import override_settings
from django.test import RequestFactory
from edxmako.shortcuts import render_to_response
from request_cache.middleware import RequestCache
from opaque_keys.edx.keys import CourseKey
from student.roles import CourseCcxCoachRole
from student.models import (
CourseEnrollment,
CourseEnrollmentAllowed,
)
from student.tests.factories import (
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from ccx_keys.locator import CCXLocator
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import get_override_for_ccx, override_field_for_ccx
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.ccx.views import get_date
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
def ccx_dummy_request():
"""
Returns dummy request object for CCX coach tab test
"""
factory = RequestFactory()
request = factory.get('ccx_coach_dashboard')
request.user = MagicMock()
return request
def setup_students_and_grades(context):
"""
Create students and set their grades.
:param context: class reference
"""
if context.course:
context.student = student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=context.course.id)
context.student2 = student2 = UserFactory.create()
CourseEnrollmentFactory.create(user=student2, course_id=context.course.id)
# create grades for self.student as if they'd submitted the ccx
for chapter in context.course.get_children():
for i, section in enumerate(chapter.get_children()):
for j, problem in enumerate(section.get_children()):
# if not problem.visible_to_staff_only:
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=context.student,
course_id=context.course.id,
module_state_key=problem.location
)
StudentModuleFactory.create(
grade=1 if i > j else 0,
max_grade=1,
student=context.student2,
course_id=context.course.id,
module_state_key=problem.location
)
def is_email(identifier):
"""
Checks if an `identifier` string is a valid email
"""
try:
validate_email(identifier)
except ValidationError:
return False
return True
@attr('shard_1')
@ddt.ddt
class TestCoachDashboard(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestCoachDashboard, cls).setUpClass()
cls.course = course = CourseFactory.create()
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
cls.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC
)
cls.chapters = [
ItemFactory.create(start=start, parent=course) for _ in xrange(2)
]
cls.sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in cls.chapters
])
cls.verticals = flatten([
[
ItemFactory.create(
start=start, due=due, parent=sequential, graded=True, format='Homework', category=u'vertical'
) for _ in xrange(2)
] for sequential in cls.sequentials
])
# Trying to wrap the whole thing in a bulk operation fails because it
# doesn't find the parents. But we can at least wrap this part...
with cls.store.bulk_operations(course.id, emit_signals=False):
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in cls.verticals
])
def setUp(self):
"""
Set up tests
"""
super(TestCoachDashboard, self).setUp()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# create an instance of modulestore
self.mstore = modulestore()
def make_coach(self):
"""
create coach user
"""
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
def make_ccx(self, max_students_allowed=settings.CCX_MAX_STUDENTS_ALLOWED):
"""
create ccx
"""
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
override_field_for_ccx(ccx, self.course, 'max_student_enrollments_allowed', max_students_allowed)
return ccx
def get_outbox(self):
"""
get fake outbox
"""
from django.core import mail
return mail.outbox
def assert_elements_in_schedule(self, url, n_chapters=2, n_sequentials=4, n_verticals=8):
"""
Helper function to count visible elements in the schedule
"""
response = self.client.get(url)
# the schedule contains chapters
chapters = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
sequentials = flatten([chapter.get('children', []) for chapter in chapters])
verticals = flatten([sequential.get('children', []) for sequential in sequentials])
# check that the numbers of nodes at different level are the expected ones
self.assertEqual(n_chapters, len(chapters))
self.assertEqual(n_sequentials, len(sequentials))
self.assertEqual(n_verticals, len(verticals))
# extract the locations of all the nodes
all_elements = chapters + sequentials + verticals
return [elem['location'] for elem in all_elements if 'location' in elem]
def hide_node(self, node):
"""
Helper function to set the node `visible_to_staff_only` property
to True and save the change
"""
node.visible_to_staff_only = True
self.mstore.update_item(node, self.coach.id)
def test_not_a_coach(self):
"""
User is not a coach, should get Forbidden response.
"""
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_no_ccx_created(self):
"""
No CCX is created, coach should see form to add a CCX.
"""
self.make_coach()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search(
'<form action=".+create_ccx"',
response.content))
def test_create_ccx(self):
"""
Create CCX. Follow redirect to coach dashboard, confirm we see
the coach dashboard for the new CCX.
"""
self.make_coach()
url = reverse(
'create_ccx',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {'name': 'New CCX'})
self.assertEqual(response.status_code, 302)
url = response.get('location') # pylint: disable=no-member
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Get the ccx_key
path = urlparse.urlparse(url).path
resolver = resolve(path)
ccx_key = resolver.kwargs['course_id']
course_key = CourseKey.from_string(ccx_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.coach, course_key))
self.assertTrue(re.search('id="ccx-schedule"', response.content))
# check if the max amount of student that can be enrolled has been overridden
ccx = CustomCourseForEdX.objects.get()
course_enrollments = get_override_for_ccx(ccx, self.course, 'max_student_enrollments_allowed')
self.assertEqual(course_enrollments, settings.CCX_MAX_STUDENTS_ALLOWED)
# assert ccx creator has role=ccx_coach
role = CourseCcxCoachRole(course_key)
self.assertTrue(role.has_user(self.coach))
def test_get_date(self):
"""
Assert that get_date returns valid date.
"""
ccx = self.make_ccx()
for section in self.course.get_children():
self.assertEqual(get_date(ccx, section, 'start'), self.mooc_start)
self.assertEqual(get_date(ccx, section, 'due'), None)
for subsection in section.get_children():
self.assertEqual(get_date(ccx, subsection, 'start'), self.mooc_start)
self.assertEqual(get_date(ccx, subsection, 'due'), self.mooc_due)
for unit in subsection.get_children():
self.assertEqual(get_date(ccx, unit, 'start', parent_node=subsection), self.mooc_start)
self.assertEqual(get_date(ccx, unit, 'due', parent_node=subsection), self.mooc_due)
@SharedModuleStoreTestCase.modifies_courseware
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_get_ccx_schedule(self, today):
"""
Gets CCX schedule and checks number of blocks in it.
Hides nodes at a different depth and checks that these nodes
are not in the schedule.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={
'course_id': CCXLocator.from_course_locator(
self.course.id, ccx.id)
}
)
# all the elements are visible
self.assert_elements_in_schedule(url)
# hide a vertical
vertical = self.verticals[0]
self.hide_node(vertical)
locations = self.assert_elements_in_schedule(url, n_verticals=7)
self.assertNotIn(unicode(vertical.location), locations)
# hide a sequential
sequential = self.sequentials[0]
self.hide_node(sequential)
locations = self.assert_elements_in_schedule(url, n_sequentials=3, n_verticals=6)
self.assertNotIn(unicode(sequential.location), locations)
# hide a chapter
chapter = self.chapters[0]
self.hide_node(chapter)
locations = self.assert_elements_in_schedule(url, n_chapters=1, n_sequentials=2, n_verticals=4)
self.assertNotIn(unicode(chapter.location), locations)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('ccx.views.TODAY')
def test_edit_schedule(self, today):
"""
Get CCX schedule, modify it, save it.
"""
today.return_value = datetime.datetime(2014, 11, 25, tzinfo=pytz.UTC)
self.make_coach()
ccx = self.make_ccx()
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
response = self.client.get(url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
self.assertEqual(len(schedule), 2)
self.assertEqual(schedule[0]['hidden'], False)
# If a coach does not override dates, then dates will be imported from master course.
self.assertEqual(
schedule[0]['start'],
self.chapters[0].start.strftime('%Y-%m-%d %H:%M')
)
self.assertEqual(
schedule[0]['children'][0]['start'],
self.sequentials[0].start.strftime('%Y-%m-%d %H:%M')
)
if self.sequentials[0].due:
expected_due = self.sequentials[0].due.strftime('%Y-%m-%d %H:%M')
else:
expected_due = None
self.assertEqual(schedule[0]['children'][0]['due'], expected_due)
url = reverse(
'save_ccx',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)})
def unhide(unit):
"""
Recursively unhide a unit and all of its children in the CCX
schedule.
"""
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
unhide(schedule[0])
schedule[0]['start'] = u'2014-11-20 00:00'
schedule[0]['children'][0]['due'] = u'2014-12-25 00:00' # what a jerk!
schedule[0]['children'][0]['children'][0]['start'] = u'2014-12-20 00:00'
schedule[0]['children'][0]['children'][0]['due'] = u'2014-12-25 00:00'
response = self.client.post(
url, json.dumps(schedule), content_type='application/json'
)
schedule = json.loads(response.content)['schedule']
self.assertEqual(schedule[0]['hidden'], False)
self.assertEqual(schedule[0]['start'], u'2014-11-20 00:00')
self.assertEqual(
schedule[0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['due'], u'2014-12-25 00:00'
)
self.assertEqual(
schedule[0]['children'][0]['children'][0]['start'], u'2014-12-20 00:00'
)
# Make sure start date set on course, follows start date of earliest
# scheduled chapter
ccx = CustomCourseForEdX.objects.get()
course_start = get_override_for_ccx(ccx, self.course, 'start')
self.assertEqual(str(course_start)[:-9], self.chapters[0].start.strftime('%Y-%m-%d %H:%M'))
# Make sure grading policy adjusted
policy = get_override_for_ccx(ccx, self.course, 'grading_policy',
self.course.grading_policy)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertEqual(policy['GRADER'][0]['min_count'], 8)
self.assertEqual(policy['GRADER'][1]['type'], 'Lab')
self.assertEqual(policy['GRADER'][1]['min_count'], 0)
self.assertEqual(policy['GRADER'][2]['type'], 'Midterm Exam')
self.assertEqual(policy['GRADER'][2]['min_count'], 0)
self.assertEqual(policy['GRADER'][3]['type'], 'Final Exam')
self.assertEqual(policy['GRADER'][3]['min_count'], 0)
@patch('ccx.views.render_to_response', intercept_renderer)
def test_save_without_min_count(self):
"""
POST grading policy without min_count field.
"""
self.make_coach()
ccx = self.make_ccx()
course_id = CCXLocator.from_course_locator(self.course.id, ccx.id)
save_policy_url = reverse(
'ccx_set_grading_policy', kwargs={'course_id': course_id})
# This policy doesn't include a min_count field
policy = {
"GRADE_CUTOFFS": {
"Pass": 0.5
},
"GRADER": [
{
"weight": 0.15,
"type": "Homework",
"drop_count": 2,
"short_label": "HW"
}
]
}
response = self.client.post(
save_policy_url, {"policy": json.dumps(policy)}
)
self.assertEqual(response.status_code, 302)
ccx = CustomCourseForEdX.objects.get()
# Make sure grading policy adjusted
policy = get_override_for_ccx(
ccx, self.course, 'grading_policy', self.course.grading_policy
)
self.assertEqual(len(policy['GRADER']), 1)
self.assertEqual(policy['GRADER'][0]['type'], 'Homework')
self.assertNotIn('min_count', policy['GRADER'][0])
save_ccx_url = reverse('save_ccx', kwargs={'course_id': course_id})
coach_dashboard_url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': course_id}
)
response = self.client.get(coach_dashboard_url)
schedule = json.loads(response.mako_context['schedule']) # pylint: disable=no-member
response = self.client.post(
save_ccx_url, json.dumps(schedule), content_type='application/json'
)
self.assertEqual(response.status_code, 200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add')),
)
@ddt.unpack
def test_enroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the enrollment of a list of students who are members
of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
enrollment = CourseEnrollmentFactory(course_id=self.course.id)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
def test_ccx_invite_enroll_up_to_limit(self):
"""
Enrolls a list of students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test can perform bulk enrollments.
"""
self.make_coach()
# create ccx and limit the maximum amount of students that can be enrolled to 2
ccx = self.make_ccx(max_students_allowed=2)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
# create some users
students = [
UserFactory.create(is_staff=False) for _ in range(3)
]
url = reverse(
'ccx_invite',
kwargs={'course_id': ccx_course_key}
)
data = {
'enrollment-button': 'Enroll',
'student-ids': u','.join([student.email for student in students]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for the first two students but not the third
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[2]).exists()
)
def test_manage_student_enrollment_limit(self):
"""
Enroll students up to the enrollment limit.
This test is specific to one of the enrollment views: the reason is because
the view used in this test cannot perform bulk enrollments.
"""
students_limit = 1
self.make_coach()
ccx = self.make_ccx(max_students_allowed=students_limit)
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
students = [
UserFactory.create(is_staff=False) for _ in range(2)
]
url = reverse(
'ccx_manage_student',
kwargs={'course_id': CCXLocator.from_course_locator(self.course.id, ccx.id)}
)
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[0].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership exists for this student
self.assertTrue(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[0]).exists()
)
# try to enroll the second student without success
# enroll the first student
data = {
'student-action': 'add',
'student-id': u','.join([students[1].email, ]),
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# a CcxMembership does not exist for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=ccx_course_key, user=students[1]).exists()
)
error_message = 'The course is full: the limit is {students_limit}'.format(
students_limit=students_limit
)
self.assertContains(response, error_message, status_code=200)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll')),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'revoke')),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'revoke')),
)
@ddt.unpack
def test_unenroll_member_student(self, view_name, send_email, outbox_count, student_form_input_name, button_tuple):
"""
Tests the unenrollment of a list of students who are members of the class.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
enrollment = CourseEnrollmentFactory(course_id=course_key)
student = enrollment.user
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([student.email, ]), # pylint: disable=no-member
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
if send_email:
self.assertIn(student.email, outbox[0].recipients()) # pylint: disable=no-member
# a CcxMembership does not exists for this student
self.assertFalse(
CourseEnrollment.objects.filter(course_id=self.course.id, user=student).exists()
)
@ddt.data(
('ccx_invite', True, 1, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Enroll'), 'nobody'),
('ccx_manage_student', True, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'dummy_student_id'),
('ccx_manage_student', True, 1, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
('ccx_manage_student', False, 0, 'student-id', ('student-action', 'add'), 'xyz@gmail.com'),
)
@ddt.unpack
def test_enroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Tests the enrollment of a list of students who are not users yet.
It tests 2 different views that use slightly different parameters,
but that perform the same task.
"""
self.make_coach()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
outbox = self.get_outbox()
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
# some error messages are returned for one of the views only
if view_name == 'ccx_manage_student' and not is_email(identifier):
error_message = 'Could not find a user with name or email "{identifier}" '.format(
identifier=identifier
)
self.assertContains(response, error_message, status_code=200)
if is_email(identifier):
if send_email:
self.assertIn(identifier, outbox[0].recipients())
self.assertTrue(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
else:
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(course_id=course_key, email=identifier).exists()
)
@ddt.data(
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody@nowhere.com'),
('ccx_invite', True, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
('ccx_invite', False, 0, 'student-ids', ('enrollment-button', 'Unenroll'), 'nobody'),
)
@ddt.unpack
def test_unenroll_non_user_student(
self, view_name, send_email, outbox_count, student_form_input_name, button_tuple, identifier):
"""
Unenroll a list of students who are not users yet
"""
self.make_coach()
course = CourseFactory.create()
ccx = self.make_ccx()
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
outbox = self.get_outbox()
CourseEnrollmentAllowed(course_id=course_key, email=identifier)
self.assertEqual(outbox, [])
url = reverse(
view_name,
kwargs={'course_id': course_key}
)
data = {
button_tuple[0]: button_tuple[1],
student_form_input_name: u','.join([identifier, ]),
}
if send_email:
data['email-students'] = 'Notify-students-by-email'
response = self.client.post(url, data=data, follow=True)
self.assertEqual(response.status_code, 200)
# we were redirected to our current location
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn(302, response.redirect_chain[0])
self.assertEqual(len(outbox), outbox_count)
self.assertFalse(
CourseEnrollmentAllowed.objects.filter(
course_id=course_key, email=identifier
).exists()
)
GET_CHILDREN = XModuleMixin.get_children
def patched_get_children(self, usage_key_filter=None):
"""Emulate system tools that mask courseware not visible to students"""
def iter_children():
"""skip children not visible to students"""
for child in GET_CHILDREN(self, usage_key_filter=usage_key_filter):
child._field_data_cache = {} # pylint: disable=protected-access
if not child.visible_to_staff_only:
yield child
return list(iter_children())
@attr('shard_1')
@override_settings(FIELD_OVERRIDE_PROVIDERS=(
'ccx.overrides.CustomCoursesForEdxOverrideProvider',))
@patch('xmodule.x_module.XModuleMixin.get_children', patched_get_children, spec=True)
class TestCCXGrades(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for Custom Courses views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestCCXGrades, cls).setUpClass()
cls._course = course = CourseFactory.create(enable_ccx=True)
# Create a course outline
cls.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
chapter = ItemFactory.create(
start=start, parent=course, category='sequential'
)
cls.sections = sections = [
ItemFactory.create(
parent=chapter,
category="sequential",
metadata={'graded': True, 'format': 'Homework'})
for _ in xrange(4)
]
# making problems available at class level for possible future use in tests
cls.problems = [
[
ItemFactory.create(
parent=section,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
) for _ in xrange(4)
] for section in sections
]
def setUp(self):
"""
Set up tests
"""
super(TestCCXGrades, self).setUp()
# Create instructor account
self.coach = coach = AdminFactory.create()
self.client.login(username=coach.username, password="test")
# Create CCX
role = CourseCcxCoachRole(self._course.id)
role.add_users(coach)
ccx = CcxFactory(course_id=self._course.id, coach=self.coach)
# override course grading policy and make last section invisible to students
override_field_for_ccx(ccx, self._course, 'grading_policy', {
'GRADER': [
{'drop_count': 0,
'min_count': 2,
'short_label': 'HW',
'type': 'Homework',
'weight': 1}
],
'GRADE_CUTOFFS': {'Pass': 0.75},
})
override_field_for_ccx(
ccx, self.sections[-1], 'visible_to_staff_only', True
)
# create a ccx locator and retrieve the course structure using that key
# which emulates how a student would get access.
self.ccx_key = CCXLocator.from_course_locator(self._course.id, ccx.id)
self.course = get_course_by_id(self.ccx_key, depth=None)
setup_students_and_grades(self)
self.client.login(username=coach.username, password="test")
self.addCleanup(RequestCache.clear_request_cache)
@patch('ccx.views.render_to_response', intercept_renderer)
@patch('instructor.views.gradebook_api.MAX_STUDENTS_PER_PAGE_GRADE_BOOK', 1)
def test_gradebook(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_gradebook',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Max number of student per page is one. Patched setting MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 1
self.assertEqual(len(response.mako_context['students']), 1) # pylint: disable=no-member
student_info = response.mako_context['students'][0] # pylint: disable=no-member
self.assertEqual(student_info['grade_summary']['percent'], 0.5)
self.assertEqual(
student_info['grade_summary']['grade_breakdown'][0]['percent'],
0.5)
self.assertEqual(
len(student_info['grade_summary']['section_breakdown']), 4)
def test_grades_csv(self):
self.course.enable_ccx = True
RequestCache.clear_request_cache()
url = reverse(
'ccx_grades_csv',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Are the grades downloaded as an attachment?
self.assertEqual(
response['content-disposition'],
'attachment'
)
rows = response.content.strip().split('\r')
headers = rows[0]
# picking first student records
data = dict(zip(headers.strip().split(','), rows[1].strip().split(',')))
self.assertNotIn('HW 04', data)
self.assertEqual(data['HW 01'], '0.75')
self.assertEqual(data['HW 02'], '0.5')
self.assertEqual(data['HW 03'], '0.25')
self.assertEqual(data['HW Avg'], '0.5')
@patch('courseware.views.render_to_response', intercept_renderer)
def test_student_progress(self):
self.course.enable_ccx = True
patch_context = patch('courseware.views.get_course_with_access')
get_course = patch_context.start()
get_course.return_value = self.course
self.addCleanup(patch_context.stop)
self.client.login(username=self.student.username, password="test")
url = reverse(
'progress',
kwargs={'course_id': self.ccx_key}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
grades = response.mako_context['grade_summary'] # pylint: disable=no-member
self.assertEqual(grades['percent'], 0.5)
self.assertEqual(grades['grade_breakdown'][0]['percent'], 0.5)
self.assertEqual(len(grades['section_breakdown']), 4)
@ddt.ddt
class CCXCoachTabTestCase(SharedModuleStoreTestCase):
"""
Test case for CCX coach tab.
"""
@classmethod
def setUpClass(cls):
super(CCXCoachTabTestCase, cls).setUpClass()
cls.ccx_enabled_course = CourseFactory.create(enable_ccx=True)
cls.ccx_disabled_course = CourseFactory.create(enable_ccx=False)
def setUp(self):
super(CCXCoachTabTestCase, self).setUp()
self.user = UserFactory.create()
for course in [self.ccx_enabled_course, self.ccx_disabled_course]:
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
role = CourseCcxCoachRole(course.id)
role.add_users(self.user)
def check_ccx_tab(self, course):
"""Helper function for verifying the ccx tab."""
request = RequestFactory().request()
request.user = self.user
all_tabs = get_course_tab_list(request, course)
return any(tab.type == 'ccx_coach' for tab in all_tabs)
@ddt.data(
(True, True, True),
(True, False, False),
(False, True, False),
(False, False, False),
(True, None, False)
)
@ddt.unpack
def test_coach_tab_for_ccx_advance_settings(self, ccx_feature_flag, enable_ccx, expected_result):
"""
Test ccx coach tab state (visible or hidden) depending on the value of enable_ccx flag, ccx feature flag.
"""
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': ccx_feature_flag}):
course = self.ccx_enabled_course if enable_ccx else self.ccx_disabled_course
self.assertEquals(
expected_result,
self.check_ccx_tab(course)
)
class TestStudentDashboardWithCCX(ModuleStoreTestCase):
"""
Test to ensure that the student dashboard works for users enrolled in CCX
courses.
"""
def setUp(self):
"""
Set up courses and enrollments.
"""
super(TestStudentDashboardWithCCX, self).setUp()
# Create a Draft Mongo and a Split Mongo course and enroll a student user in them.
self.student_password = "foobar"
self.student = UserFactory.create(username="test", password=self.student_password, is_staff=False)
self.draft_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.split_course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
CourseEnrollment.enroll(self.student, self.draft_course.id)
CourseEnrollment.enroll(self.student, self.split_course.id)
# Create a CCX coach.
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.split_course.id)
role.add_users(self.coach)
# Create a CCX course and enroll the user in it.
self.ccx = CcxFactory(course_id=self.split_course.id, coach=self.coach)
last_week = datetime.datetime.now(UTC()) - datetime.timedelta(days=7)
override_field_for_ccx(self.ccx, self.split_course, 'start', last_week) # Required by self.ccx.has_started().
course_key = CCXLocator.from_course_locator(self.split_course.id, self.ccx.id)
CourseEnrollment.enroll(self.student, course_key)
def test_load_student_dashboard(self):
self.client.login(username=self.student.username, password=self.student_password)
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTrue(re.search('Test CCX', response.content))
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def iter_blocks(course):
"""
Returns an iterator over all of the blocks in a course.
"""
def visit(block):
""" get child blocks """
yield block
for child in block.get_children():
for descendant in visit(child): # wish they'd backport yield from
yield descendant
return visit(course)
|
ZLLab-Mooc/edx-platform
|
lms/djangoapps/ccx/tests/test_views.py
|
Python
|
agpl-3.0
| 40,635
|
[
"VisIt"
] |
46a01b32f373d6f190b2b39d8171d2152b0ca46b798a4f03853d6eac35279ee6
|
# -*- coding: utf-8 -*-
"""
@namespace Desenho
Pixmap manipulation
Copyright 2007, NATE-LSI-EPUSP
Oficina is developed in Brazil at Escola Politécnica of
Universidade de São Paulo. NATE is part of LSI (Integrable
Systems Laboratory) and stands for Learning, Work and Entertainment
Research Group. Visit our web page:
www.lsi.usp.br/nate
Suggestions, bugs and doubts, please email oficina@lsi.usp.br
Oficina is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation version 2 of
the License.
Oficina is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with Oficina; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301 USA.
The copy of the GNU General Public License is found in the
COPYING file included in the source distribution.
Authors:
Joyce Alessandra Saul (joycealess@gmail.com)
Andre Mossinato (andremossinato@gmail.com)
Nathalia Sautchuk Patrício (nathalia.sautchuk@gmail.com)
Pedro Kayatt (pekayatt@gmail.com)
Rafael Barbolo Lopes (barbolo@gmail.com)
Alexandre A. Gonçalves Martinazzo (alexandremartinazzo@gmail.com)
Colaborators:
Bruno Gola (brunogola@gmail.com)
Group Manager:
Irene Karaguilla Ficheman (irene@lsi.usp.br)
Cientific Coordinator:
Roseli de Deus Lopes (roseli@lsi.usp.br)
UI Design (OLPC):
Eben Eliason (eben@laptop.org)
Project Coordinator (OLPC):
Manusheel Gupta (manu@laptop.org)
Project Advisor (OLPC):
Walter Bender (walter@laptop.org)
"""
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
import logging
import math
import cairo
# The time to wait for the resize operation to be
# executed, after the resize controls are pressed.
RESIZE_DELAY = 500
class Desenho:
# Pixmap manipulation
def __init__(self, widget):
"""Initialize Desenho object.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
"""
self._resize_timer = None
self._rainbow_color_list = ['#ff0000', # red
'#ff8000', # orange
'#ffff00', # yellow
'#80ff00', # lime
'#00ff00', # green
'#00ff80', # green water
'#00ffff', # light blue
'#007fff', # almost blue
'#0000ff', # blue
'#8000ff', # indigo
'#ff00ff', # pink violet
'#ff0080'] # violet
self._rainbow_counter = 0
self.points = []
self.points1 = []
self.points2 = []
self.points3 = []
self.points4 = []
self._last_points_used = []
self._last_point_drawn_index = 0
def clear_control_points(self):
self._last_points_used = []
def line(self, widget, coords, temp):
"""Draw line.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
ctx.save()
ctx.new_path()
ctx.set_line_width(widget.tool['line size'])
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.move_to(widget.oldx, widget.oldy)
ctx.line_to(coords[0], coords[1])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def eraser(self, widget, coords, last):
"""Erase part of the drawing.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
self._trace(widget, coords, last)
def brush(self, widget, coords, last):
"""Paint with brush.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
self._trace(widget, coords, last)
def kalidoscope(self, widget, coords, last):
"""Paint with kalidoscope.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
if not last:
self.points1 = []
self.points2 = []
self.points3 = []
self.points4 = []
shape = widget.tool['line shape']
rounded = (shape == 'circle')
x1, y1 = coords
x3, y2 = x1, y1
width, height = widget.get_size()
x2 = width - x1
x4 = x2
y3 = height - y1
y4 = y3
self.points1.append((x1, y1))
self.points2.append((x2, y2))
self.points3.append((x3, y3))
self.points4.append((x4, y4))
self._draw_polygon(widget, False, False, self.points1, False, rounded)
self._draw_polygon(widget, False, False, self.points2, False, rounded)
self._draw_polygon(widget, False, False, self.points3, False, rounded)
self._draw_polygon(widget, False, False, self.points4, False, rounded)
widget.queue_draw()
def stamp(self, widget, coords, last, stamp_size=20):
"""Paint with stamp.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param stamp_size -- integer (default 20)
"""
widget.desenha = False
width = widget.resized_stamp.get_width()
height = widget.resized_stamp.get_height()
dx = coords[0] - width / 2
dy = coords[1] - height / 2
widget.drawing_ctx.save()
widget.drawing_ctx.translate(dx, dy)
widget.drawing_ctx.rectangle(dx, dy, width, height)
Gdk.cairo_set_source_pixbuf(widget.drawing_ctx, widget.resized_stamp,
0, 0)
widget.drawing_ctx.paint()
widget.drawing_ctx.restore()
widget.queue_draw_area(dx, dy, width, height)
def rainbow(self, widget, coords, last):
"""Paint with rainbow.
@param self -- Desenho.Desenho instance
@param last -- last of oldx
@param widget -- Area object (GtkDrawingArea)
@param color -- select the color adress
@param coords -- Two value tuple
@param size -- integer (default 30)
@param shape -- string (default 'circle')
"""
_color_str = self._rainbow_color_list[self._rainbow_counter]
_color = Gdk.color_parse(_color_str)
self._rainbow_counter += 1
if self._rainbow_counter > 11:
self._rainbow_counter = 0
widget.drawing_ctx.set_source_rgba(_color.red, _color.green,
_color.blue, 0.3)
self._old_trace(widget, coords, last)
def _old_trace(self, widget, coords, last):
"""
_old_trace is used only by rainbow
"""
widget.desenha = False
size = widget.tool['line size']
shape = widget.tool['line shape']
if shape == 'circle':
if last:
widget.drawing_ctx.set_line_width(size)
widget.drawing_ctx.set_line_cap(cairo.LINE_CAP_ROUND)
widget.drawing_ctx.set_line_join(cairo.LINE_JOIN_ROUND)
widget.drawing_ctx.move_to(last[0], last[1])
widget.drawing_ctx.line_to(coords[0], coords[1])
widget.drawing_ctx.stroke()
else:
widget.drawing_ctx.move_to(coords[0], coords[1])
widget.drawing_ctx.arc(coords[0], coords[1],
size / 2, 0., 2 * math.pi)
# when activity starts with rainbow tool, need this to
# not paint the background
widget.drawing_ctx.set_source_rgba(1.0, 1.0, 1.0, 0.0)
widget.drawing_ctx.fill()
elif shape == 'square':
if last:
points = [(last[0] - size / 2, last[1] - size / 2),
(coords[0] - size / 2, coords[1] - size / 2),
(coords[0] + size / 2, coords[1] + size / 2),
(last[0] + size / 2, last[1] + size / 2)]
for point in points:
widget.drawing_ctx.line_to(*point)
widget.drawing_ctx.fill()
points = [(last[0] + size / 2, last[1] - size / 2),
(coords[0] + size / 2, coords[1] - size / 2),
(coords[0] - size / 2, coords[1] + size / 2),
(last[0] - size / 2, last[1] + size / 2)]
for point in points:
widget.drawing_ctx.line_to(*point)
widget.drawing_ctx.fill()
else:
widget.drawing_ctx.move_to(coords[0] - size / 2,
coords[1] - size / 2)
widget.drawing_ctx.rectangle(coords[0] - size / 2,
coords[1] - size / 2, size, size)
# when activity starts with rainbow tool, need this to
# not paint the background
widget.drawing_ctx.set_source_rgba(1.0, 1.0, 1.0, 0.0)
widget.drawing_ctx.fill()
if last:
x = min(coords[0], last[0])
width = max(coords[0], last[0]) - x
y = min(coords[1], last[1])
height = max(coords[1], last[1]) - y
# We add size to avoid drawing dotted lines
widget.queue_draw_area(x - size, y - size,
width + size * 2, height + size * 2)
else:
widget.queue_draw()
def finish_trace(self, widget):
widget.desenha = False
shape = widget.tool['line shape']
rounded = (shape == 'circle')
self._draw_polygon(widget, False, False, self.points, False, rounded)
if not rounded and len(self.points) == 1:
# draw a square if the mouse was not moved
size = widget.tool['line size']
coords = self.points[0]
widget.drawing_ctx.save()
if widget.tool['name'] == 'eraser':
color = (1.0, 1.0, 1.0, 1.0)
else:
color = widget.tool['cairo_stroke_color']
widget.drawing_ctx.set_source_rgba(*color)
widget.drawing_ctx.move_to(coords[0] - size / 2,
coords[1] - size / 2)
widget.drawing_ctx.rectangle(coords[0] - size / 2,
coords[1] - size / 2, size, size)
widget.drawing_ctx.fill()
widget.drawing_ctx.restore()
self.points = []
self._last_point_drawn_index = 0
def _trace(self, widget, coords, last):
widget.desenha = True
size = widget.tool['line size']
shape = widget.tool['line shape']
rounded = (shape == 'circle')
self.points.append((coords[0], coords[1]))
if last:
self._draw_polygon(widget, True, False, self.points, False,
rounded)
self.clear_control_points()
if last:
x = min(coords[0], last[0])
width = max(coords[0], last[0]) - x
y = min(coords[1], last[1])
height = max(coords[1], last[1]) - y
# We add size to avoid drawing dotted lines
widget.queue_draw_area(x - size, y - size,
width + size * 2, height + size * 2)
def square(self, widget, coords, temp, fill):
"""Draw a square.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
x, y, dx, dy, = self.adjust(widget, coords)
points = [(x, y), (x + dx, y), (x + dx, y + dy), (x, y + dy)]
self._draw_polygon(widget, temp, fill, points)
def _draw_polygon(self, widget, temp, fill, points, closed=True,
rounded=False):
if not points:
return
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
ctx.save()
ctx.new_path()
ctx.move_to(*points[0])
for point in points:
ctx.line_to(*point)
if closed:
ctx.close_path()
if rounded:
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
else:
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
ctx.set_line_width(widget.tool['line size'])
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
if widget.tool['name'] == 'eraser':
ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)
else:
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
if fill or closed:
self._last_points_used.extend(points)
area = widget.calculate_damaged_area(self._last_points_used)
widget.queue_draw_area(*area)
else:
# if is a open line and is not filled (like when using the pencil)
# we don't need draw all the poligon, can draw only the part
# from the last queue update until now
self._last_points_used = points[self._last_point_drawn_index:]
if self._last_points_used:
area = widget.calculate_damaged_area(self._last_points_used)
self._last_point_drawn_index = len(points)
widget.queue_draw_area(*area)
def triangle(self, widget, coords, temp, fill):
"""Draw a triangle.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
points = [(widget.oldx, widget.oldy),
(widget.oldx + int((coords[0] - widget.oldx) / 2),
coords[1]),
(coords[0], widget.oldy)]
self._draw_polygon(widget, temp, fill, points)
def trapezoid(self, widget, coords, temp, fill):
"""Draw a trapezoid.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
dif = int((coords[0] - widget.oldx) / 4)
points = [(widget.oldx, widget.oldy), (widget.oldx + dif, coords[1]),
(coords[0] - dif, coords[1]), (coords[0], widget.oldy)]
self._draw_polygon(widget, temp, fill, points)
def arrow(self, widget, coords, temp, fill):
"""Draw a arrow.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / 2
r = math.hypot(y, x)
p = [(widget.oldx, widget.oldy)]
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
p.append((widget.oldx + int(0.74 * r * math.cos(A + dA / 6)),
widget.oldy + int(0.74 * r * math.sin(A + dA / 6))))
p.append((widget.oldx + int(2 * r * math.cos(A + dA / 6 + dA / 20)),
widget.oldy + int(2 * r * math.sin(A + dA / 6 + dA / 20))))
p.append((widget.oldx +
int(2 * r * math.cos(A + dA / 6 - dA / 20 + dA / 6)),
widget.oldy +
int(2 * r * math.sin(A + dA / 6 - dA / 20 + dA / 6))))
p.append((widget.oldx + int(0.74 * r * math.cos(A + dA / 6 + dA / 6)),
widget.oldy + int(0.74 * r * math.sin(A + dA / 6 + dA / 6))))
p.append((widget.oldx + int(r * math.cos(A + dA / 2)),
widget.oldy + int(r * math.sin(A + dA / 2))))
self._draw_polygon(widget, temp, fill, p)
def parallelogram(self, widget, coords, temp, fill):
"""Draw a parallelogram.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = int((coords[0] - widget.oldx) / 4)
points = [(widget.oldx, widget.oldy), (coords[0] - x, widget.oldy),
(coords[0], coords[1]), (widget.oldx + x, coords[1])]
self._draw_polygon(widget, temp, fill, points)
def star(self, widget, coords, n, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param n -- number of sides
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / n
r = math.hypot(y, x)
p = [(widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))),
(widget.oldx + int(0.4 * r * math.cos(A + dA / 2)),
widget.oldy + int(0.4 * r * math.sin(A + dA / 2)))]
for _i in range(int(n) - 1):
A = A + dA
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
p.append((widget.oldx + int(0.4 * r * math.cos(A + dA / 2)),
widget.oldy + int(0.4 * r * math.sin(A + dA / 2))))
self._draw_polygon(widget, temp, fill, p)
def polygon_regular(self, widget, coords, n, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param n -- number of sides
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
x = coords[0] - widget.oldx
y = coords[1] - widget.oldy
A = math.atan2(y, x)
dA = 2 * math.pi / n
r = math.hypot(y, x)
p = [(widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A)))]
for _i in range(int(n) - 1):
A = A + dA
p.append((widget.oldx + int(r * math.cos(A)),
widget.oldy + int(r * math.sin(A))))
self._draw_polygon(widget, temp, fill, p)
def heart(self, widget, coords, temp, fill):
"""Draw polygon with n sides.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
dy = math.fabs(coords[1] - widget.oldy)
r = math.hypot(dy, dy)
w = r / 10.0
if w == 0:
# non invertible cairo matrix
return
ctx.set_line_width(widget.tool['line size'])
line_width = ctx.get_line_width()
ctx.save()
ctx.new_path()
ctx.translate(widget.oldx, widget.oldy)
ctx.scale(w, w)
ctx.move_to(0, 0)
ctx.curve_to(0, -30, -50, -30, -50, 0)
ctx.curve_to(-50, 30, 0, 35, 0, 60)
ctx.curve_to(0, 35, 50, 30, 50, 0)
ctx.curve_to(50, -30, 0, -30, 0, 0)
ctx.set_line_width(line_width / w)
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def circle(self, widget, coords, temp, fill):
"""Draw a circle.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between context and temp context
@param fill -- Fill object
"""
if temp:
ctx = widget.temp_ctx
else:
ctx = widget.drawing_ctx
x, y, dx, dy = self.adjust(widget, coords)
if dx == 0 or dy == 0:
# scale by 0 gives error
return
ctx.set_line_width(widget.tool['line size'])
line_width = ctx.get_line_width()
ctx.save()
ctx.new_path()
ctx.translate(x, y)
ctx.scale(dx, dy)
ctx.arc(0., 0., 1., 0., 2 * math.pi)
ctx.set_line_width(line_width / float(min(dx, dy)))
if fill:
ctx.save()
ctx.set_source_rgba(*widget.tool['cairo_fill_color'])
ctx.fill_preserve()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1)
ctx.stroke_preserve()
ctx.restore()
ctx.set_source_rgba(*widget.tool['cairo_stroke_color'])
ctx.stroke()
ctx.restore()
# TODO: clip
widget.queue_draw()
def clear(self, widget):
"""Clear the drawing.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
"""
logging.debug('Desenho.clear')
widget.desenha = False
widget.textos = []
x, y = 0, 0
width, height = widget.get_size()
# try to clear a selected area first
if widget.is_selected():
selection_surface = widget.get_selection()
_x, _y, width, height = widget.get_selection_bounds()
ctx = cairo.Context(selection_surface)
ctx.rectangle(0, 0, width, height)
ctx.set_source_rgb(1.0, 1.0, 1.0)
ctx.fill()
else:
widget.drawing_ctx.rectangle(x, y, width, height)
widget.drawing_ctx.set_source_rgb(1.0, 1.0, 1.0)
widget.drawing_ctx.fill()
widget.queue_draw()
def text(self, widget, coord_x, coord_y):
"""Display and draw text in the drawing area.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coord_x
@param coord_y
"""
if not widget.text_in_progress:
widget.text_in_progress = True
widget.activity.move_textview(coord_x, coord_y)
widget.activity.textview.show()
widget.activity.textview.set_cursor_visible(True)
widget.activity.textview.grab_focus()
else:
widget.text_in_progress = False
textview = widget.activity.textview
textview.set_cursor_visible(False)
# need wait until the cursor is hidden
GObject.idle_add(self._finalize_text, widget, textview)
def _finalize_text(self, widget, textview):
buf = textview.get_buffer()
window = textview.get_window(Gtk.TextWindowType.TEXT)
ctx = widget.drawing_ctx
tv_alloc = textview.get_allocation()
Gdk.cairo_set_source_window(ctx, window, tv_alloc.x, tv_alloc.y)
ctx.paint()
widget.activity.textview.hide()
widget.drawing_canvas.flush()
try:
widget.activity.textview.set_text('')
except AttributeError:
buf.set_text('')
widget.enable_undo()
widget.queue_draw()
def selection(self, widget, coords):
"""Make a selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
"""
x, y, dx, dy = self.adjust(widget, coords, True)
widget.set_selection_bounds(x, y, dx, dy)
# TODO: clip
widget.queue_draw()
def move_selection(self, widget, coords):
"""Move the selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param mvcopy -- Copy or Move
@param pixbuf_copy -- For import image
"""
widget.desenha = True
dx = int(coords[0] - widget.oldx)
dy = int(coords[1] - widget.oldy)
x, y, width, height = widget.get_selection_bounds()
if widget.pending_clean_selection_background:
# clear the selection background
widget.clear_selection_background()
widget.pending_clean_selection_background = False
widget.oldx = coords[0]
widget.oldy = coords[1]
new_x, new_y = x + dx, y + dy
widget.set_selection_start(new_x, new_y)
widget.queue_draw()
def resize_selection(self, widget, coords):
"""Move the selection.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param mvcopy -- Copy or Move
@param pixbuf_copy -- For import image
"""
dx = int(coords[0] - widget.oldx)
dy = int(coords[1] - widget.oldy)
sel_width = widget.selection_surface.get_width()
sel_height = widget.selection_surface.get_height()
if widget.pending_clean_selection_background:
# clear the selection background
widget.clear_selection_background()
widget.pending_clean_selection_background = False
width_scale = float(sel_width + dx) / float(sel_width)
height_scale = float(sel_height + dy) / float(sel_height)
if width_scale < 0 or height_scale < 0:
return
widget.resize_selection_surface(width_scale, height_scale)
def freeform(self, widget, coords, temp, fill, param=None):
"""Draw polygon.
@param self -- Desenho.Desenho instance
@param widget -- Area object (GtkDrawingArea)
@param coords -- Two value tuple
@param temp -- switch between drawing context and temp context
@param fill -- Fill object
"""
if param == "moving":
# mouse not pressed moving
if self.points:
if widget.last:
self.points.append((coords[0], coords[1]))
widget.last = []
else:
self.points[-1] = (coords[0], coords[1])
elif param == "motion":
# when mousepress or mousemove
if widget.last:
self.points.append((widget.last[0], widget.last[1]))
self.points.append((coords[0], coords[1]))
else:
self.points.append((widget.oldx, widget.oldy))
self.points.append((coords[0], coords[1]))
widget.last = coords
elif param == "release":
if len(self.points) > 2:
first = self.points[0]
dx = coords[0] - first[0]
dy = coords[1] - first[1]
d = math.hypot(dx, dy)
if d > 20:
widget.last = coords
self.points.append(coords)
else:
# close the polygon
self.points.append((first[0], first[1]))
# set the last point index to zero to force draw all
# the polygon
self._last_point_drawn_index = 0
self._draw_polygon(widget, False, fill, self.points)
widget.desenha = False
widget.last = []
self.points = []
widget.enable_undo()
return
widget.desenha = True
if self.points:
# Draw a circle to show where the freeform start/finish
ctx = widget.temp_ctx
ctx.save()
x_init, y_init = self.points[0]
ctx.new_path()
ctx.translate(x_init, y_init)
ctx.set_line_width(1)
ctx.set_source_rgba(1., 1., 1., 1.)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.arc(0, 0, 20, 0., 2 * math.pi)
ctx.stroke_preserve()
ctx.set_dash([5, 5], 0)
ctx.set_source_rgba(0., 0., 0., 1.)
ctx.stroke()
ctx.restore()
# Display the polygon open in the temp canvas
self._draw_polygon(widget, True, False, self.points, closed=False)
self.clear_control_points()
def adjust(self, widget, coords, locked=False):
width, height = widget.get_size()
if widget.oldx > int(coords[0]):
xi = int(coords[0])
xf = widget.oldx
else:
xi = widget.oldx
xf = int(coords[0])
if widget.oldy > int(coords[1]):
yi = int(coords[1])
yf = widget.oldy
else:
yi = widget.oldy
yf = int(coords[1])
if locked:
if xi < 0:
xi = 0
if yi < 0:
yi = 0
if xf > width:
xf = width
if yf > height:
yf = height
dx = xf - xi
dy = yf - yi
return xi, yi, dx, dy
|
samdroid-apps/paint-activity
|
Desenho.py
|
Python
|
gpl-2.0
| 32,007
|
[
"VisIt"
] |
7f04d5b6bfd9ff4b9d170d7b66358233074a62f252d6bbf1538875d94c90ee23
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-SNGP-B/16 finetuning on CIFAR.
"""
# pylint: enable=line-too-long
import ml_collections
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar10'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 10
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
# OOD evaluation dataset
config.ood_datasets = ['cifar100', 'svhn_cropped']
config.ood_num_classes = [100, 10]
config.ood_split = 'test'
config.ood_methods = ['msp', 'entropy', 'maha', 'rmaha']
pp_eval_ood = []
for num_classes in config.ood_num_classes:
if num_classes > config.num_classes:
# Note that evaluation_fn ignores the entries with all zero labels for
# evaluation. When num_classes > n_cls, we should use onehot{num_classes},
# otherwise the labels that are greater than n_cls will be encoded with
# all zeros and then be ignored.
pp_eval_ood.append(
config.pp_eval.replace(f'onehot({config.num_classes}',
f'onehot({num_classes}'))
else:
pp_eval_ood.append(config.pp_eval)
config.pp_eval_ood = pp_eval_ood
# CIFAR-10H eval
config.eval_on_cifar_10h = True
config.pp_eval_cifar_10h = f'decode|resize({INPUT_RES})|value_range(-1, 1)|keep(["image", "labels"])'
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
# Re-initialize the trainable parameters in GP output layer (Also those in the
# dense output layer if loading from deterministic checkpoint).
config.model_reinit_params = ('head/output_layer/kernel',
'head/output_layer/bias', 'head/kernel',
'head/bias')
# This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# Gaussian process layer section
config.gp_layer = ml_collections.ConfigDict()
config.gp_layer.ridge_penalty = 1.
# Disable momentum in order to use exact covariance update for finetuning.
config.gp_layer.covmat_momentum = -1.
config.gp_layer.mean_field_factor = 20.
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.0005
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
return config
def get_sweep(hyper):
# Below shows an example for how to sweep hyperparameters.
# lr_grid = [1e-4, 5e-4, 1e-3, 2e-3,]
# clip_grid = [-1., 0.5, 1., 2.5, 5., 10.]
# mf_grid = [-1., 0.1, 0.5, 1., 2.5, 5., 7.5, 10., 12.5, 15., 20.]
return hyper.product([
# hyper.sweep('config.lr.base', lr_grid),
# hyper.sweep('config.grad_clip_norm', clip_grid),
# hyper.sweep('config.gp_layer.mean_field_factor', mf_grid),
])
|
google/uncertainty-baselines
|
baselines/jft/experiments/imagenet21k_vit_base16_sngp_finetune_cifar.py
|
Python
|
apache-2.0
| 5,192
|
[
"Gaussian"
] |
b5bccf94c6e08e198a5440438c39a1779811823e73dae4f7c60b60f015b70ae9
|
#!/usr/bin/env python
#
# This example reads a volume dataset and displays it via volume rendering.
#
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the renderer, the render window, and the interactor. The renderer
# draws into the render window, the interactor enables mouse- and
# keyboard-based interaction with the scene.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# The following reader is used to read a series of 2D slices (images)
# that compose the volume. The slice dimensions are set, and the
# pixel spacing. The data Endianness must also be specified. The reader
# usese the FilePrefix in combination with the slice number to construct
# filenames using the format FilePrefix.%d. (In this case the FilePrefix
# is the root name of the file: quarter.)
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetImageRange(1, 93)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetDataSpacing(3.2, 3.2, 1.5)
# The volume will be displayed by ray-cast alpha compositing.
# A ray-cast mapper is needed to do the ray-casting, and a
# compositing function is needed to do the compositing along the ray.
rayCastFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetInputConnection(v16.GetOutputPort())
volumeMapper.SetVolumeRayCastFunction(rayCastFunction)
# The color transfer function maps voxel intensities to colors.
# It is modality-specific, and often anatomy-specific as well.
# The goal is to one color for flesh (between 500 and 1000)
# and another color for bone (1150 and over).
volumeColor = vtk.vtkColorTransferFunction()
volumeColor.AddRGBPoint(0, 0.0, 0.0, 0.0)
volumeColor.AddRGBPoint(500, 1.0, 0.5, 0.3)
volumeColor.AddRGBPoint(1000, 1.0, 0.5, 0.3)
volumeColor.AddRGBPoint(1150, 1.0, 1.0, 0.9)
# The opacity transfer function is used to control the opacity
# of different tissue types.
volumeScalarOpacity = vtk.vtkPiecewiseFunction()
volumeScalarOpacity.AddPoint(0, 0.00)
volumeScalarOpacity.AddPoint(500, 0.15)
volumeScalarOpacity.AddPoint(1000, 0.15)
volumeScalarOpacity.AddPoint(1150, 0.85)
# The gradient opacity function is used to decrease the opacity
# in the "flat" regions of the volume while maintaining the opacity
# at the boundaries between tissue types. The gradient is measured
# as the amount by which the intensity changes over unit distance.
# For most medical data, the unit distance is 1mm.
volumeGradientOpacity = vtk.vtkPiecewiseFunction()
volumeGradientOpacity.AddPoint(0, 0.0)
volumeGradientOpacity.AddPoint(90, 0.5)
volumeGradientOpacity.AddPoint(100, 1.0)
# The VolumeProperty attaches the color and opacity functions to the
# volume, and sets other volume properties. The interpolation should
# be set to linear to do a high-quality rendering. The ShadeOn option
# turns on directional lighting, which will usually enhance the
# appearance of the volume and make it look more "3D". However,
# the quality of the shading depends on how accurately the gradient
# of the volume can be calculated, and for noisy data the gradient
# estimation will be very poor. The impact of the shading can be
# decreased by increasing the Ambient coefficient while decreasing
# the Diffuse and Specular coefficient. To increase the impact
# of shading, decrease the Ambient and increase the Diffuse and Specular.
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(volumeColor)
volumeProperty.SetScalarOpacity(volumeScalarOpacity)
volumeProperty.SetGradientOpacity(volumeGradientOpacity)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(0.4)
volumeProperty.SetDiffuse(0.6)
volumeProperty.SetSpecular(0.2)
# The vtkVolume is a vtkProp3D (like a vtkActor) and controls the position
# and orientation of the volume in world coordinates.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# Finally, add the volume to the renderer
ren.AddViewProp(volume)
# Set up an initial view of the volume. The focal point will be the
# center of the volume, and the camera position will be 400mm to the
# patient's left (whis is our right).
camera = ren.GetActiveCamera()
c = volume.GetCenter()
camera.SetFocalPoint(c[0], c[1], c[2])
camera.SetPosition(c[0] + 400, c[1], c[2])
camera.SetViewUp(0, 0, -1)
# Increase the size of the render window
renWin.SetSize(640, 480)
# Interact with the data.
iren.Initialize()
renWin.Render()
iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Medical/Python/Medical4.py
|
Python
|
bsd-3-clause
| 4,778
|
[
"VTK"
] |
a051f5d56b0c931bb8f48e861abd46078daa480f39a0f6d2bcae36c79a9f6e5f
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import calendar
from collections import deque, namedtuple
from datetime import datetime
from google.protobuf.descriptor import FieldDescriptor
import pytz
from jormungandr.timezone import get_timezone
from navitiacommon import response_pb2, type_pb2
from builtins import range, zip
from importlib import import_module
import logging
from jormungandr.exceptions import ConfigException, UnableToParse, InvalidArguments
from six.moves.urllib.parse import urlparse
from jormungandr import new_relic
from six.moves import range
from six.moves import zip
from jormungandr.exceptions import TechnicalError
from flask import request
import re
import flask
from contextlib import contextmanager
import functools
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
DATETIME_FORMAT = "%Y%m%dT%H%M%S"
def get_uri_pt_object(pt_object):
if pt_object.embedded_type == type_pb2.ADDRESS:
coords = pt_object.uri.split(';')
return "coord:{}:{}".format(coords[0], coords[1])
return pt_object.uri
def kilometers_to_meters(distance):
return distance * 1000.0
def is_coord(uri):
# for the moment we do a simple check
return get_lon_lat(uri) != (None, None)
def get_lon_lat(uri):
"""
extract lon lat from an uri
the uri should be formated as: 'lon;lat'
>>> get_lon_lat('12.3;-5.3')
(12.3, -5.3)
>>> get_lon_lat('bob')
(None, None)
>>> get_lon_lat('5.3;bob')
(None, None)
>>> get_lon_lat('5.0;0.0')
(5.0, 0.0)
"""
if not uri:
return None, None
if uri.count(';') == 1:
try:
lon, lat = uri.split(';')
# we check that both are float
return float(lon), float(lat)
except ValueError:
return None, None
return None, None
def is_url(url):
if not url or url.strip() == '':
return False
url_parsed = urlparse(url)
return url_parsed.scheme.strip() != '' and url_parsed.netloc.strip() != ''
def str_to_time_stamp(str):
"""
convert a string to a posix timestamp
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
date = datetime.strptime(str, DATETIME_FORMAT)
return date_to_timestamp(date)
def str_to_dt(str):
"""
convert a string to a datetime
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
return datetime.strptime(str, DATETIME_FORMAT)
def date_to_timestamp(date):
"""
convert a datatime objet to a posix timestamp (number of seconds from 1070/1/1)
"""
return int(calendar.timegm(date.utctimetuple()))
def str_datetime_utc_to_local(dt, timezone):
from jormungandr.interfaces.parsers import DateTimeFormat
if dt:
utc_dt = DateTimeFormat()(dt)
else:
utc_dt = datetime.utcnow()
local = pytz.timezone(timezone)
return dt_to_str(utc_dt.replace(tzinfo=pytz.UTC).astimezone(local))
def timestamp_to_datetime(timestamp, tz=None):
"""
Convert a timestamp to datetime
if timestamp > MAX_INT we return None
"""
maxint = 9223372036854775807
# when a date is > 2038-01-19 03:14:07
# we receive a timestamp = 18446744071562142720 (64 bits) > 9223372036854775807 (MAX_INT 32 bits)
# And ValueError: timestamp out of range for platform time_t is raised
if timestamp >= maxint:
return None
dt = datetime.utcfromtimestamp(timestamp)
timezone = tz or get_timezone()
if timezone:
dt = pytz.utc.localize(dt)
return dt.astimezone(timezone)
return None
def dt_to_str(dt):
return dt.strftime(DATETIME_FORMAT)
def timestamp_to_str(timestamp):
dt = timestamp_to_datetime(timestamp)
if dt:
return dt_to_str(dt)
return None
def walk_dict(tree, visitor):
"""
depth first search on a dict.
call the visit(elem) method on the visitor for each node
if the visitor returns True, stop the search
>>> bob = {'tutu': 1,
... 'tata': [1, 2],
... 'toto': {'bob':12, 'bobette': 13, 'nested_bob': {'bob': 3}},
... 'tete': ('tuple1', ['ltuple1', 'ltuple2']),
... 'titi': [{'a':1}, {'b':1}]}
>>> def my_visitor(name, val):
... print("{}={}".format(name, val))
>>> walk_dict(bob, my_visitor)
titi={u'b': 1}
b=1
titi={u'a': 1}
a=1
tete=ltuple2
tete=ltuple1
tete=tuple1
tutu=1
toto={u'bobette': 13, u'bob': 12, u'nested_bob': {u'bob': 3}}
nested_bob={u'bob': 3}
bob=3
bob=12
bobette=13
tata=2
tata=1
>>> def my_stoper_visitor(name, val):
... print("{}={}".format(name, val))
... if name == 'tete':
... return True
>>> walk_dict(bob, my_stoper_visitor)
titi={u'b': 1}
b=1
titi={u'a': 1}
a=1
tete=ltuple2
"""
queue = deque()
def add_elt(name, elt, first=False):
if isinstance(elt, (list, tuple)):
for val in elt:
queue.append((name, val))
elif hasattr(elt, 'items'):
for k, v in elt.items():
queue.append((k, v))
elif first: # for the first elt, we add it even if it is no collection
queue.append((name, elt))
add_elt("main", tree, first=True)
while queue:
elem = queue.pop()
#we don't want to visit the list, we'll visit each node separately
if not isinstance(elem[1], (list, tuple)):
if visitor(elem[0], elem[1]) is True:
#we stop the search if the visitor returns True
break
#for list and tuple, the name is the parent's name
add_elt(elem[0], elem[1])
def walk_protobuf(pb_object, visitor):
"""
Walk on a protobuf and call the visitor for each nodes
>>> journeys = response_pb2.Response()
>>> journey_standard = journeys.journeys.add()
>>> journey_standard.type = "none"
>>> journey_standard.duration = 1
>>> journey_standard.nb_transfers = 2
>>> s = journey_standard.sections.add()
>>> s.duration = 3
>>> s = journey_standard.sections.add()
>>> s.duration = 4
>>> journey_rapid = journeys.journeys.add()
>>> journey_rapid.duration = 5
>>> journey_rapid.nb_transfers = 6
>>> s = journey_rapid.sections.add()
>>> s.duration = 7
>>>
>>> from collections import defaultdict
>>> types_counter = defaultdict(int)
>>> def visitor(name, val):
... types_counter[type(val)] +=1
>>>
>>> walk_protobuf(journeys, visitor)
>>> types_counter[response_pb2.Response]
1
>>> types_counter[response_pb2.Journey]
2
>>> types_counter[response_pb2.Section]
3
>>> types_counter[int] # and 7 int in all
7
"""
queue = deque()
def add_elt(name, elt):
try:
fields = elt.ListFields()
except AttributeError:
return
for field, value in fields:
if field.label == FieldDescriptor.LABEL_REPEATED:
for v in value:
queue.append((field.name, v))
else:
queue.append((field.name, value))
# add_elt("main", pb_object)
queue.append(('main', pb_object))
while queue:
elem = queue.pop()
visitor(elem[0], elem[1])
add_elt(elem[0], elem[1])
def realtime_level_to_pbf(level):
if level == 'base_schedule':
return type_pb2.BASE_SCHEDULE
elif level == 'adapted_schedule':
return type_pb2.ADAPTED_SCHEDULE
elif level == 'realtime':
return type_pb2.REALTIME
else:
raise ValueError('Impossible to convert in pbf')
#we can't use reverse(enumerate(list)) without creating a temporary
#list, so we define our own reverse enumerate
def reverse_enumerate(l):
return zip(xrange(len(l)-1, -1, -1), reversed(l))
def pb_del_if(l, pred):
'''
Delete the elements such as pred(e) is true in a protobuf list.
Return the number of elements deleted.
'''
nb = 0
for i, e in reverse_enumerate(l):
if pred(e):
del l[i]
nb += 1
return nb
def create_object(configuration):
"""
Create an object from a dict
The dict must contains a 'class' key with the class path of the class we want to create
It can contains also an 'args' key with a dictionary of arguments to pass to the constructor
"""
class_path = configuration['class']
kwargs = configuration.get('args', {})
log = logging.getLogger(__name__)
try:
if '.' not in class_path:
log.warn('impossible to build object {}, wrongly formated class'.format(class_path))
raise ConfigException(class_path)
module_path, name = class_path.rsplit('.', 1)
module = import_module(module_path)
attr = getattr(module, name)
except AttributeError as e:
log.warn('impossible to build object {} : {}'.format(class_path, e))
raise ConfigException(class_path)
except ImportError:
log.exception('impossible to build object {}, cannot find class'.format(class_path))
raise ConfigException(class_path)
try:
obj = attr(**kwargs) # call to the contructor, with all the args
except TypeError as e:
log.warn('impossible to build object {}, wrong arguments: {}'.format(class_path, e.message))
raise ConfigException(class_path)
return obj
def generate_id():
import uuid
return uuid.uuid4()
def get_pt_object_coord(pt_object):
"""
Given a PtObject, return the coord according to its embedded_type
:param pt_object: type_pb2.PtObject
:return: coord: type_pb2.GeographicalCoord
>>> pt_object = type_pb2.PtObject()
>>> pt_object.embedded_type = type_pb2.POI
>>> pt_object.poi.coord.lon = 42.42
>>> pt_object.poi.coord.lat = 41.41
>>> coord = get_pt_object_coord(pt_object)
>>> coord.lon
42.42
>>> coord.lat
41.41
"""
if not isinstance(pt_object, type_pb2.PtObject):
logging.getLogger(__name__).error('Invalid pt_object')
raise InvalidArguments('Invalid pt_object')
map_coord = {
type_pb2.STOP_POINT: "stop_point",
type_pb2.STOP_AREA: "stop_area",
type_pb2.ADDRESS: "address",
type_pb2.ADMINISTRATIVE_REGION: "administrative_region",
type_pb2.POI: "poi"
}
attr = getattr(pt_object,
map_coord.get(pt_object.embedded_type, ""),
None)
coord = getattr(attr, "coord", None)
if not coord:
logging.getLogger(__name__).error('Invalid coord for ptobject type: {}'.format(pt_object.embedded_type))
raise UnableToParse('Invalid coord for ptobject type: {}'.format(pt_object.embedded_type))
return coord
def record_external_failure(message, connector_type, connector_name):
params = {'{}_system_id'.format(connector_type): unicode(connector_name), 'message': message}
new_relic.record_custom_event('{}_external_failure'.format(connector_type), params)
def decode_polyline(encoded, precision=6):
'''
Version of : https://developers.google.com/maps/documentation/utilities/polylinealgorithm
But with improved precision
See: https://mapzen.com/documentation/mobility/decoding/#python (valhalla)
http://developers.geovelo.fr/#/documentation/compute (geovelo)
'''
inv = 10**-precision
decoded = []
previous = [0, 0]
i = 0
#for each byte
while i < len(encoded):
#for each coord (lat, lon)
ll = [0, 0]
for j in [0, 1]:
shift = 0
byte = 0x20
#keep decoding bytes until you have this coord
while byte >= 0x20:
byte = ord(encoded[i]) - 63
i += 1
ll[j] |= (byte & 0x1f) << shift
shift += 5
#get the final value adding the previous offset and remember it for the next
ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))
previous[j] = ll[j]
#scale by the precision and chop off long coords also flip the positions so
# #its the far more standard lon,lat instead of lat,lon
decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])
#hand back the list of coordinates
return decoded
# PeriodExtremity is used to provide a datetime and it's meaning
# datetime: given datetime (obviously)
# represents_start: is True if it's start of period, False if it's the end of period
# (mostly used for fallback management in experimental scenario)
PeriodExtremity = namedtuple('PeriodExtremity', ['datetime', 'represents_start'])
class SectionSorter(object):
def __call__(self, a, b):
if a.begin_date_time != b.begin_date_time:
return -1 if a.begin_date_time < b.begin_date_time else 1
else:
return -1 if a.end_date_time < b.end_date_time else 1
def make_namedtuple(typename, *fields, **fields_with_default):
"""
helper to create a named tuple with some default values
:param typename: name of the type
:param fields: required argument of the named tuple
:param fields_with_default: positional arguments with fields and their default value
:return: the namedtuple
>>> Bob = make_namedtuple('Bob', 'a', 'b', c=2, d=14)
>>> Bob(b=14, a=12)
Bob(a=12, b=14, c=2, d=14)
>>> Bob(14, 12) # non named argument also works
Bob(a=14, b=12, c=2, d=14)
>>> Bob(12, b=14, d=123)
Bob(a=12, b=14, c=2, d=123)
>>> Bob(a=12) # Note: the error message is not the same in python 3 (they are better in python 3)
Traceback (most recent call last):
TypeError: __new__() takes at least 3 arguments (2 given)
>>> Bob()
Traceback (most recent call last):
TypeError: __new__() takes at least 3 arguments (1 given)
"""
import collections
field_names = list(fields) + list(fields_with_default.keys())
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = tuple(fields_with_default.values())
return T
def get_timezone_str(default='Africa/Abidjan'):
try:
timezone = get_timezone()
except TechnicalError:
return default
else:
return timezone.zone if timezone else default
def get_current_datetime_str(is_utc=False):
timezone = 'Africa/Abidjan' if is_utc else get_timezone_str()
current_datetime = request.args.get('_current_datetime')
return str_datetime_utc_to_local(current_datetime, timezone)
def make_timestamp_from_str(strftime):
"""
:param strftime:
:return: double
>>> make_timestamp_from_str("2017-12-25T08:07:59 +01:00")
1514185679
>>> make_timestamp_from_str("20171225T080759+01:00")
1514185679
>>> make_timestamp_from_str("2017-12-25 08:07:59 +01:00")
1514185679
>>> make_timestamp_from_str("20171225T080759Z")
1514189279
"""
from dateutil import parser
import calendar
return calendar.timegm(parser.parse(strftime).utctimetuple())
def get_house_number(housenumber):
hn = 0
numbers = re.findall(r'^\d+', housenumber or "0")
if len(numbers) > 0:
hn = numbers[0]
return int(hn)
# The two following functions allow to use flask request context in greenlet
# The decorator provided by flask (@copy_current_request_context) will generate an assertion error with multiple greenlets
def copy_flask_request_context():
"""
Make a copy of the 'main' flask request conquest to be used with the context manager below
:return: a copy of the current flask request context
"""
# Copy flask request context to be used in greenlet
top = flask._request_ctx_stack.top
if top is None:
raise RuntimeError('This function can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
return top.copy()
@contextmanager
def copy_context_in_greenlet_stack(request_context):
"""
Push a copy of the 'main' flask request context in a global stack created for it.
Pop the copied request context to discard it
ex:
request_context = utils.copy_flask_request_context()
def worker():
with utils.copy_context_in_greenlet_stack(request_context):
# do some work here with flask request context available
gevent.spawn(worker) # Multiples times
:param request_context: a copy of the 'main' flask request context
"""
flask.globals._request_ctx_stack.push(request_context)
yield
flask.globals._request_ctx_stack.pop()
def compose(*funs):
"""
compose functions and return a callable object
example 1:
f(x) = x + 1
g(x) = 2*x
compose(f,g) = g(f(x)) = 2 * (x + 1 )
example 2:
f(a list of integer): returns multiples of 3
g(a list of integer): returns multiples of 5
compose(f,g): returns multiples of 3 AND 5
:param funs:
:return: a lambda
>>> c = compose(lambda x: x+1, lambda x: 2*x)
>>> c(42)
86
>>> f = lambda l: (x for x in l if x%3 == 0)
>>> g = lambda l: (x for x in l if x%5 == 0)
>>> c = compose(f, g)
>>> list(c(range(45)))
[0, 15, 30]
"""
return lambda obj: functools.reduce(lambda prev, f: f(prev), funs, obj)
class ComposedFilter(object):
"""
Compose several filters with convenient interfaces
All filters are evaluated lazily
>>> F = ComposedFilter()
>>> f = F.add_filter(lambda x: x % 2 == 0).add_filter(lambda x: x % 5 == 0).compose_filters()
>>> list(f(range(40)))
[0, 10, 20, 30]
>>> list(f(range(20))) # we can reuse the composed filter
[0, 10]
>>> f = F.add_filter(lambda x: x % 3 == 0).compose_filters() # we can continue on adding new filter
>>> list(f(range(40)))
[0, 30]
"""
def __init__(self):
self.filters = []
def add_filter(self, pred):
self.filters.append(lambda iterable: (i for i in iterable if pred(i)))
return self
def compose_filters(self):
return compose(*self.filters)
def portable_min(*args, **kwargs):
"""
a portable min() for python2 which takes a default value when
the iterable is empty
>>> portable_min([1], default=42)
1
>>> portable_min([], default=42)
42
>>> portable_min(iter(()), default=43) # empty iterable
43
"""
if PY2:
default = kwargs.pop('default', None)
try:
return min(*args, **kwargs)
except ValueError:
return default
except Exception:
raise
if PY3:
return min(*args, **kwargs)
|
Tisseo/navitia
|
source/jormungandr/jormungandr/utils.py
|
Python
|
agpl-3.0
| 19,960
|
[
"VisIt"
] |
264eb9ad853c0291078c959178ecc49e484142cfca2f3760fe186fe470f7192c
|
"""
Tests for miscellaneous utilities.
"""
import cPickle
import gzip
import numpy as np
import shutil
import tempfile
import unittest
from rdkit import Chem
from rdkit.Chem import AllChem
from vs_utils.utils import (DatasetSharder, pad_array, read_pickle,
ScaffoldGenerator, SmilesGenerator, SmilesMap,
write_pickle)
from vs_utils.utils.rdkit_utils import conformers, serial
class TestDatasetSharder(unittest.TestCase):
"""
Test DatasetSharder.
"""
def setUp(self):
"""
Set up tests.
"""
self.reader = serial.MolReader()
# generate molecules
smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O', 'CC(C)CC1=CC=C(C=C1)C(C)C(=O)O',
'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F']
names = ['aspirin', 'ibuprofen', 'celecoxib']
self.mols = []
for s, n in zip(smiles, names):
mol = Chem.MolFromSmiles(s)
mol.SetProp('_Name', n)
AllChem.Compute2DCoords(mol)
self.mols.append(mol)
# write molecules to file
self.temp_dir = tempfile.mkdtemp()
writer = serial.MolWriter()
_, self.filename = tempfile.mkstemp(dir=self.temp_dir,
suffix='.sdf.gz')
with writer.open(self.filename) as w:
w.write(self.mols)
self.sharder = DatasetSharder(filename=self.filename,
write_shards=False)
self.reader = serial.MolReader()
def tearDown(self):
"""
Clean up tests.
"""
shutil.rmtree(self.temp_dir)
def compare_mols(self, mols, ref_slice=None):
"""
Compare sharded molecules with original molecules.
Parameters
----------
mols : iterable
Molecules to compare to reference molecules.
ref_slice : slice, optional
Slice of self.mols to compare with sharded molecules.
"""
ref_mols = self.mols
if ref_slice is not None:
ref_mols = self.mols[ref_slice]
assert len(mols) == len(ref_mols)
for a, b in zip(mols, ref_mols):
assert Chem.MolToSmiles(a) == Chem.MolToSmiles(b)
assert a.GetProp('_Name') == b.GetProp('_Name')
def test_shard(self):
"""
Test DatasetSharder.shard.
"""
shards = list(self.sharder)
assert len(shards) == 1
self.compare_mols(shards[0])
def test_leftover(self):
"""
Test sharding when total % chunk_size != 0.
"""
self.sharder.shard_size = 2
shards = list(self.sharder)
assert len(shards) == 2
assert len(shards[0]) == 2
self.compare_mols(shards[0], slice(2))
assert len(shards[1]) == 1
self.compare_mols(shards[1], slice(2, 3))
def test_next_filename(self):
"""
Test DatasetSharder.next_filename.
"""
self.sharder.prefix = 'foo'
self.sharder.flavor = 'bar'
self.sharder.index = 5
for i in xrange(10):
assert self.sharder._next_filename() == 'foo-{}.bar'.format(i + 5)
def test_write_shards(self):
"""
Test DatasetSharder.write_shard.
"""
_, prefix = tempfile.mkstemp(dir=self.temp_dir)
self.sharder.prefix = prefix
self.sharder.write_shards = True
self.sharder.flavor = 'sdf.gz'
self.sharder.shard()
mols = list(self.reader.open('{}-0.sdf.gz'.format(prefix)))
self.compare_mols(mols)
def test_preserve_mol_properties_when_pickling(self):
"""
Test preservation of molecule properties when pickling.
"""
_, prefix = tempfile.mkstemp(dir=self.temp_dir)
self.sharder.prefix = prefix
self.sharder.write_shards = True
self.sharder.shard()
mols = list(self.reader.open('{}-0.pkl.gz'.format(prefix)))
self.compare_mols(mols)
def test_guess_prefix(self):
"""
Test guess_prefix.
"""
self.sharder = DatasetSharder(filename='../foo.bar.gz')
assert self.sharder.prefix == 'foo'
class TestMiscUtils(unittest.TestCase):
"""
Tests for miscellaneous utilities.
"""
def setUp(self):
"""
Set up tests.
"""
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
"""
Clean up tests.
"""
shutil.rmtree(self.temp_dir)
def test_pad_matrix(self):
"""
Test pad_matrix.
"""
x = np.random.random((5, 6))
assert pad_array(x, (10, 12)).shape == (10, 12)
assert pad_array(x, 10).shape == (10, 10)
def test_read_pickle(self):
"""
Test read_pickle.
"""
_, filename = tempfile.mkstemp(dir=self.temp_dir, suffix='.pkl')
with open(filename, 'wb') as f:
cPickle.dump({'foo': 'bar'}, f, cPickle.HIGHEST_PROTOCOL)
assert read_pickle(filename)['foo'] == 'bar'
def test_read_pickle_gz(self):
"""
Test read_pickle with gzipped pickle.
"""
_, filename = tempfile.mkstemp(dir=self.temp_dir, suffix='.pkl.gz')
with gzip.open(filename, 'wb') as f:
cPickle.dump({'foo': 'bar'}, f, cPickle.HIGHEST_PROTOCOL)
assert read_pickle(filename)['foo'] == 'bar'
def test_write_pickle(self):
"""
Test write_pickle.
"""
_, filename = tempfile.mkstemp(dir=self.temp_dir, suffix='.pkl')
write_pickle({'foo': 'bar'}, filename)
with open(filename) as f:
assert cPickle.load(f)['foo'] == 'bar'
def test_write_pickle_gz(self):
"""
Test write_pickle with gzipped pickle.
"""
_, filename = tempfile.mkstemp(dir=self.temp_dir, suffix='.pkl.gz')
write_pickle({'foo': 'bar'}, filename)
with gzip.open(filename) as f:
assert cPickle.load(f)['foo'] == 'bar'
class SmilesTests(unittest.TestCase):
def setUp(self):
"""
Set up tests.
"""
smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O', 'CC(C)CC1=CC=C(C=C1)C(C)C(=O)O',
'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F']
names = ['aspirin', 'ibuprofen', 'celecoxib']
self.cids = [2244, 3672, 2662]
self.mols = []
for s, n in zip(smiles, names):
mol = Chem.MolFromSmiles(s)
mol.SetProp('_Name', n)
self.mols.append(mol)
class TestSmilesGenerator(SmilesTests):
"""
Test SmilesGenerator.
"""
def setUp(self):
"""
Set up tests.
"""
super(TestSmilesGenerator, self).setUp()
self.engine = SmilesGenerator()
def test_get_smiles(self):
"""
Test SmilesGenerator.get_smiles.
"""
for mol in self.mols:
smiles = self.engine.get_smiles(mol)
new = Chem.MolFromSmiles(smiles)
assert new.GetNumAtoms() == mol.GetNumAtoms()
def test_get_smiles_3d(self):
"""
Test SmilesGenerator.get_smiles with stereochemistry assigned from 3D
coordinates.
"""
# generate conformers for ibuprofen
engine = conformers.ConformerGenerator()
mol = engine.generate_conformers(self.mols[1])
assert mol.GetNumConformers() > 0
# check that chirality has not yet been assigned
smiles = self.engine.get_smiles(mol)
assert '@' not in smiles # check for absence of chirality marker
chiral_types = [Chem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.ChiralType.CHI_TETRAHEDRAL_CCW]
chiral = False
for atom in mol.GetAtoms():
if atom.GetChiralTag() in chiral_types:
chiral = True
assert not chiral
# generate SMILES
self.engine = SmilesGenerator(assign_stereo_from_3d=True)
smiles = self.engine.get_smiles(mol)
assert '@' in smiles # check for chirality marker
new = Chem.MolFromSmiles(smiles)
assert new.GetNumAtoms() == self.mols[1].GetNumAtoms()
# check that chirality was assigned to ibuprofen
chiral = False
for atom in mol.GetAtoms():
if atom.GetChiralTag() in chiral_types:
chiral = True
assert chiral
class TestSmilesMap(SmilesTests):
"""
Test SmilesMap.
"""
def setUp(self):
"""
Set up tests.
"""
super(TestSmilesMap, self).setUp()
self.map = SmilesMap()
def test_add_mol(self):
"""
Test SmilesMap.add_mol.
"""
for mol in self.mols:
self.map.add_mol(mol)
smiles_map = self.map.get_map()
for mol in self.mols:
assert smiles_map[mol.GetProp('_Name')] == Chem.MolToSmiles(
mol, isomericSmiles=True)
def test_add_bare_id(self):
"""
Test failure when adding bare IDs.
"""
for mol, cid in zip(self.mols, self.cids):
mol.SetProp('_Name', str(cid))
try:
for mol in self.mols:
self.map.add_mol(mol)
raise AssertionError
except TypeError:
pass
def test_add_bare_id_with_prefix(self):
"""
Test success when adding bare IDs with a prefix set.
"""
self.map = SmilesMap('CID')
for mol, cid in zip(self.mols, self.cids):
mol.SetProp('_Name', str(cid))
for mol in self.mols:
self.map.add_mol(mol)
smiles_map = self.map.get_map()
for mol in self.mols:
assert (smiles_map['CID{}'.format(mol.GetProp('_Name'))] ==
Chem.MolToSmiles(mol, isomericSmiles=True))
def test_fail_on_duplicate_id(self):
"""
Test failure when adding a duplicate ID with a different SMILES string.
"""
new = Chem.Mol(self.mols[0])
new.SetProp('_Name', 'celecoxib')
self.mols.append(new)
try:
for mol in self.mols:
self.map.add_mol(mol)
raise AssertionError
except ValueError:
pass
def test_fail_on_duplicate_smiles(self):
"""
Test failure when adding a duplicate SMILES with a different ID.
"""
self.map = SmilesMap(allow_duplicates=False)
new = Chem.Mol(self.mols[0])
new.SetProp('_Name', 'fakedrug')
self.mols.append(new)
try:
for mol in self.mols:
self.map.add_mol(mol)
raise AssertionError
except ValueError:
pass
class TestScaffoldGenerator(unittest.TestCase):
"""
Test ScaffoldGenerator.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O',
'CN1C=C(C2=CC=CC=C21)C(=O)[C@@H]3CCC4=C(C3)NC=N4']
names = ['aspirin', 'ramosetron']
self.mols = []
for this_smiles, name in zip(smiles, names):
mol = Chem.MolFromSmiles(this_smiles)
mol.SetProp('_Name', name)
self.mols.append(mol)
self.engine = ScaffoldGenerator()
def test_scaffolds(self):
"""
Test scaffold generation.
"""
scaffolds = [self.engine.get_scaffold(mol) for mol in self.mols]
scaffold_mols = [Chem.MolFromSmiles(scaffold)
for scaffold in scaffolds]
for mol, ref_mol in zip(scaffold_mols, self.mols):
assert mol.GetNumAtoms() < ref_mol.GetNumAtoms()
assert scaffold_mols[0].GetNumAtoms() == 6
assert scaffold_mols[1].GetNumAtoms() == 20
def test_chiral_scaffolds(self):
"""
Test chiral scaffold generation.
"""
achiral_scaffold = self.engine.get_scaffold(self.mols[1])
self.engine = ScaffoldGenerator(include_chirality=True)
chiral_scaffold = self.engine.get_scaffold(self.mols[1])
assert '@' not in achiral_scaffold
assert '@' in chiral_scaffold
assert (Chem.MolFromSmiles(achiral_scaffold).GetNumAtoms() ==
Chem.MolFromSmiles(chiral_scaffold).GetNumAtoms())
|
rbharath/vs-utils
|
vs_utils/utils/tests/test_utils.py
|
Python
|
gpl-3.0
| 12,316
|
[
"RDKit"
] |
cd9122a53d4b36a3e523f4c653f9a52567f55f32cc4ea366ee329fda9383fb67
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from functools import partial
from PyQt4.Qt import (
QMainWindow, Qt, QApplication, pyqtSignal, QLabel, QIcon, QFormLayout,
QDialog, QSpinBox, QCheckBox, QDialogButtonBox, QToolButton, QMenu, QInputDialog)
from calibre.gui2 import error_dialog
from calibre.gui2.tweak_book import actions
from calibre.gui2.tweak_book.editor.canvas import Canvas
class ResizeDialog(QDialog): # {{{
def __init__(self, width, height, parent=None):
QDialog.__init__(self, parent)
self.l = l = QFormLayout(self)
self.setLayout(l)
self.aspect_ratio = width / float(height)
l.addRow(QLabel(_('Choose the new width and height')))
self._width = w = QSpinBox(self)
w.setMinimum(1)
w.setMaximum(10 * width)
w.setValue(width)
w.setSuffix(' px')
l.addRow(_('&Width:'), w)
self._height = h = QSpinBox(self)
h.setMinimum(1)
h.setMaximum(10 * height)
h.setValue(height)
h.setSuffix(' px')
l.addRow(_('&Height:'), h)
w.valueChanged.connect(partial(self.keep_ar, 'width'))
h.valueChanged.connect(partial(self.keep_ar, 'height'))
self.ar = ar = QCheckBox(_('Keep &aspect ratio'))
ar.setChecked(True)
l.addRow(ar)
self.resize(self.sizeHint())
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addRow(bb)
def keep_ar(self, which):
if self.ar.isChecked():
val = getattr(self, which)
oval = val / self.aspect_ratio if which == 'width' else val * self.aspect_ratio
other = getattr(self, '_height' if which == 'width' else '_width')
other.blockSignals(True)
other.setValue(oval)
other.blockSignals(False)
@dynamic_property
def width(self):
def fget(self):
return self._width.value()
def fset(self, val):
self._width.setValue(val)
return property(fget=fget, fset=fset)
@dynamic_property
def height(self):
def fget(self):
return self._height.value()
def fset(self, val):
self._height.setValue(val)
return property(fget=fget, fset=fset)
# }}}
class Editor(QMainWindow):
has_line_numbers = False
modification_state_changed = pyqtSignal(object)
undo_redo_state_changed = pyqtSignal(object, object)
data_changed = pyqtSignal(object)
cursor_position_changed = pyqtSignal() # dummy
copy_available_state_changed = pyqtSignal(object)
def __init__(self, syntax, parent=None):
QMainWindow.__init__(self, parent)
if parent is None:
self.setWindowFlags(Qt.Widget)
self.is_synced_to_container = False
self.syntax = syntax
self._is_modified = False
self.copy_available = self.cut_available = False
self.quality = 90
self.canvas = Canvas(self)
self.setCentralWidget(self.canvas)
self.create_toolbars()
self.canvas.image_changed.connect(self.image_changed)
self.canvas.undo_redo_state_changed.connect(self.undo_redo_state_changed)
self.canvas.selection_state_changed.connect(self.update_clipboard_actions)
@dynamic_property
def is_modified(self):
def fget(self):
return self._is_modified
def fset(self, val):
self._is_modified = val
self.modification_state_changed.emit(val)
return property(fget=fget, fset=fset)
@property
def undo_available(self):
return self.canvas.undo_action.isEnabled()
@property
def redo_available(self):
return self.canvas.redo_action.isEnabled()
@dynamic_property
def current_line(self):
def fget(self):
return 0
def fset(self, val):
pass
return property(fget=fget, fset=fset)
@property
def number_of_lines(self):
return 0
def pretty_print(self, name):
return False
def get_raw_data(self):
return self.canvas.get_image_data(quality=self.quality)
@dynamic_property
def data(self):
def fget(self):
return self.get_raw_data()
def fset(self, val):
self.canvas.load_image(val)
return property(fget=fget, fset=fset)
def replace_data(self, raw, only_if_different=True):
# We ignore only_if_different as it is useless in our case, and
# there is no easy way to check two images for equality
self.data = raw
def apply_settings(self, prefs=None, dictionaries_changed=False):
pass
def go_to_line(self, *args, **kwargs):
pass
def set_focus(self):
self.canvas.setFocus(Qt.OtherFocusReason)
def undo(self):
self.canvas.undo_action.trigger()
def redo(self):
self.canvas.redo_action.trigger()
def copy(self):
self.canvas.copy()
def cut(self):
return error_dialog(self, _('Not allowed'), _(
'Cutting of images is not allowed. If you want to delete the image, use'
' the files browser to do it.'), show=True)
def paste(self):
self.canvas.paste()
# Search and replace {{{
def mark_selected_text(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
return False
def replace(self, *args, **kwargs):
return False
def all_in_marked(self, *args, **kwargs):
return 0
@property
def selected_text(self):
return ''
# }}}
def image_changed(self, new_image):
self.is_synced_to_container = False
self._is_modified = True
self.copy_available = self.canvas.is_valid
self.copy_available_state_changed.emit(self.copy_available)
self.data_changed.emit(self)
self.modification_state_changed.emit(True)
self.fmt_label.setText(' ' + (self.canvas.original_image_format or '').upper())
im = self.canvas.current_image
self.size_label.setText('{0} x {1}{2}'.format(im.width(), im.height(), ' px'))
def break_cycles(self):
self.canvas.break_cycles()
self.canvas.image_changed.disconnect()
self.canvas.undo_redo_state_changed.disconnect()
self.canvas.selection_state_changed.disconnect()
self.modification_state_changed.disconnect()
self.undo_redo_state_changed.disconnect()
self.data_changed.disconnect()
self.cursor_position_changed.disconnect()
self.copy_available_state_changed.disconnect()
def contextMenuEvent(self, ev):
ev.ignore()
def create_toolbars(self):
self.action_bar = b = self.addToolBar(_('File actions tool bar'))
b.setObjectName('action_bar') # Needed for saveState
for x in ('undo', 'redo'):
b.addAction(getattr(self.canvas, '%s_action' % x))
self.edit_bar = b = self.addToolBar(_('Edit actions tool bar'))
for x in ('copy', 'paste'):
ac = actions['editor-%s' % x]
setattr(self, 'action_' + x, b.addAction(ac.icon(), x, getattr(self, x)))
self.update_clipboard_actions()
b.addSeparator()
self.action_trim = ac = b.addAction(QIcon(I('trim.png')), _('Trim image'), self.canvas.trim_image)
self.action_rotate = ac = b.addAction(QIcon(I('rotate-right.png')), _('Rotate image'), self.canvas.rotate_image)
self.action_resize = ac = b.addAction(QIcon(I('resize.png')), _('Resize image'), self.resize_image)
b.addSeparator()
self.action_filters = ac = b.addAction(QIcon(I('filter.png')), _('Image filters'))
b.widgetForAction(ac).setPopupMode(QToolButton.InstantPopup)
self.filters_menu = m = QMenu()
ac.setMenu(m)
m.addAction(_('Auto-trim image'), self.canvas.autotrim_image)
m.addAction(_('Sharpen image'), self.sharpen_image)
m.addAction(_('Blur image'), self.blur_image)
m.addAction(_('De-speckle image'), self.canvas.despeckle_image)
self.info_bar = b = self.addToolBar(_('Image information bar'))
self.fmt_label = QLabel('')
b.addWidget(self.fmt_label)
b.addSeparator()
self.size_label = QLabel('')
b.addWidget(self.size_label)
def update_clipboard_actions(self, *args):
if self.canvas.has_selection:
self.action_copy.setText(_('Copy selected region'))
self.action_paste.setText(_('Paste into selected region'))
else:
self.action_copy.setText(_('Copy image'))
self.action_paste.setText(_('Paste image'))
def resize_image(self):
im = self.canvas.current_image
d = ResizeDialog(im.width(), im.height(), self)
if d.exec_() == d.Accepted:
self.canvas.resize_image(d.width, d.height)
def sharpen_image(self):
val, ok = QInputDialog.getInt(self, _('Sharpen image'), _(
'The standard deviation for the Gaussian sharpen operation (higher means more sharpening)'), value=3, min=1, max=20)
if ok:
self.canvas.sharpen_image(sigma=val)
def blur_image(self):
val, ok = QInputDialog.getInt(self, _('Blur image'), _(
'The standard deviation for the Gaussian blur operation (higher means more blurring)'), value=3, min=1, max=20)
if ok:
self.canvas.blur_image(sigma=val)
def launch_editor(path_to_edit, path_is_raw=False):
app = QApplication([])
if path_is_raw:
raw = path_to_edit
else:
with open(path_to_edit, 'rb') as f:
raw = f.read()
t = Editor('raster_image')
t.data = raw
t.show()
app.exec_()
if __name__ == '__main__':
launch_editor(sys.argv[-1])
|
palerdot/calibre
|
src/calibre/gui2/tweak_book/editor/image.py
|
Python
|
gpl-3.0
| 10,092
|
[
"Gaussian"
] |
e00ca7965efaa50ec3797f0dd70a42e8306aa1eed67b1f4d9d5bef72ee624c60
|
"""Print out the source text corresponding to AST nodes.
"""
import os
import astroid
import colorama
import inflection
from colorama import Back, Fore, Style
import python_ta.transforms.setendings as setendings
colorama.init(strip=False, autoreset=True)
def _wrap_color(code_string):
"""Wrap key parts in styling and resets.
Stying for each key part from,
(col_offset, fromlineno) to (end_col_offset, end_lineno).
Note: use this to set color back to default (on mac, and others?):
Style.RESET_ALL + Style.DIM
"""
ret = Style.BRIGHT + Fore.WHITE + Back.BLACK
ret += code_string
ret += Style.RESET_ALL + Style.DIM + Fore.RESET + Back.RESET
return ret
def print_node(filename, node_class):
"""Print all nodes of the given class in the given file."""
with open(filename) as f:
content = f.read()
source_lines = content.split("\n")
module = astroid.parse(content)
# Set end_lineno and end_col_offset for all nodes in `module`.
ending_transformer = setendings.init_register_ending_setters(source_lines)
ending_transformer.visit(module)
for node in module.nodes_of_class(node_class):
if node.fromlineno == node.end_lineno:
line = source_lines[node.fromlineno - 1] # string
out = [
line[: node.col_offset]
+
# The key part:
_wrap_color(line[node.col_offset : node.end_col_offset])
+ line[node.end_col_offset :]
]
else:
first_line = source_lines[node.fromlineno - 1] # string
middle_lines = source_lines[node.fromlineno : node.end_lineno - 1] # list
last_line = source_lines[node.end_lineno - 1] # string
if middle_lines:
# For each item in the list of lines of strings,
# add colorama style to middle like the first and last lines
middle_lines = "\n".join(list(map(_wrap_color, middle_lines))) + "\n"
else:
middle_lines = "" # coerce list to string
if first_line: # Add a spacing after first_line
middle_lines = "\n" + middle_lines
out = [
first_line[: node.col_offset]
+
# The key part:
_wrap_color(first_line[node.col_offset :])
+ middle_lines
+ _wrap_color(last_line[: node.end_col_offset])
+ last_line[node.end_col_offset :]
]
print(Style.DIM + "\n".join(out))
if __name__ == "__main__":
for node_class in astroid.nodes.ALL_NODE_CLASSES:
print("=== {} ===".format(node_class.__name__))
file_location = "nodes/" + inflection.underscore(node_class.__name__) + ".py"
try:
print_node(file_location, node_class)
except FileNotFoundError:
print("WARNING: No file for class {}".format(node_class))
except AttributeError:
print("ERROR: for class {}".format(node_class))
print("")
|
pyta-uoft/pyta
|
sample_usage/print_nodes.py
|
Python
|
gpl-3.0
| 3,092
|
[
"VisIt"
] |
b55d4dce3ff79e6918b0e8eee251519fc5a12ce27c200c0bc20aeafb47b67cb3
|
import numpy
from .network import generate_full_rank_matrix
def unscaled_control_coefficients(stoichiometry, elasticity):
_, n = stoichiometry.shape
# do Gaussian elimination,
# and get reduced stoichiometry, kernel and link matrix
link_matrix, kernel_matrix, independent_list = generate_full_rank_matrix(stoichiometry)
reduced_matrix = numpy.take(stoichiometry, independent_list, 0)
# constract Jacobian matrix from reduced, link matrix and elasticities,
# M0 = N0 * epsilon * L
epsilon_L = elasticity @ link_matrix
jacobian = reduced_matrix @ epsilon_L
# calculate unscaled concentration control coefficients
# CS = -L * (M0)^(-1) * N0
inv_jacobian = numpy.linalg.inv(jacobian)
ccc = -link_matrix @ inv_jacobian
ccc = ccc @ reduced_matrix
# calculate unscaled flux control coefficients
# CJ = I - epsilon * CS
fcc = numpy.identity(n, dtype=numpy.float) + elasticity @ ccc
return (ccc, fcc)
def invdiag(trace):
'''
return numpy.lib.twodim_base.diag(1.0 / trace)
if there\'re zeros in the array, set zero for that
trace: (array) one dimensional array
return (matrix)
'''
inv_trace = numpy.zeros(len(trace), dtype=numpy.float)
for i in range(len(trace)):
if abs(trace[i]) > 0.0:
inv_trace[i] = 1.0 / trace[i]
return numpy.lib.twodim_base.diag(inv_trace)
def scale_control_coefficients(ccc, fcc, v, x):
# calculate scaled concentration control coefficient
# (scaled CS_ij) = E_j / S_i * (unscaled CS_ij)
ccc = invdiag(x) @ ccc
ccc = ccc @ numpy.lib.twodim_base.diag(v)
# calculate scaled flux control coefficient
# (scaled CJ_ij) = E_j / E_i * (unscaled CJ_ij)
fcc = invdiag(v) @ fcc
fcc = fcc @ numpy.lib.twodim_base.diag(v)
return (ccc, fcc)
def scaled_control_coefficients(stoichiometry, elasticity, fluxes, x):
ccc, fcc = unscaled_control_coefficients(stoichiometry, elasticity)
ccc, fcc = scale_control_coefficients(ccc, fcc, fluxes, x)
return (ccc, fcc)
|
ecell/ecell4
|
ecell4/mca/cc.py
|
Python
|
gpl-3.0
| 2,045
|
[
"Gaussian"
] |
25f943a51dc69c14c40ea2a5ba2fa3e5977d1777f549bf518396cfcf527951e6
|
# Writing Functions That Accept Any Number of Arguments
def avg(first, *rest):
return (first + sum(rest)) / (1 + len(rest))
# Sample use
print(avg(1, 2)) # 1.5
print(avg(1, 2, 3, 4)) # 2.5
# Writing Functions That Only Accept Keyword Arguments
def mininum(*values, clip=None):
m = min(values)
if clip is not None:
m = clip if clip > m else m
return m
print(mininum(1, 5, 2, -5, 10)) # Returns -5
print(mininum(1, 5, 2, -5, 10, clip=0)) # Returns 0
# Attaching Informational Metadata to Function Arguments
def add(x:int, y:int) -> int:
return x + y
print(add(1, 2))
# Returning Multiple Values from a Function
def myfun():
return 1, 2, 3
# Defining Functions with Default Arguments
def spam(a, b=42):
print(a, b)
# Defining Anonymous or Inline Functions
add = lambda x, y: x + y
print(add(2,3))
print(add('hello', 'world'))
names = ['David Beazley', 'Brian Jones', 'Raymond Hettinger', 'Ned Batchelder']
print(sorted(names, key=lambda name: name.split()[-1].lower()))
# Capturing Variables in Anonymous Functions
x = 10
a = lambda y: x + y
print(a(10))
# Replacing Single Method Classes with Functions
from urllib.request import urlopen
def urltemplate(template):
def opener(**kwargs):
return urlopen(template.format_map(kwargs))
return opener
#yahoo = urltemplate('http://finance.yahoo.com/d/quotes.csv?s={names}&f={fields}')
#for line in yahoo(names='IBM,AAPL,FB', fields='sl1c1v'):
# print(line.decode('utf-8'))
# Carrying Extra State with Callback Functions
def apply_async(func, args, *, callback):
# Compute the result
result = func(*args)
# Invoke the callback with the result
callback(result)
def print_result(result):
print('Got:', result)
def add(x, y):
return x + y
apply_async(add, (2, 3), callback=print_result)
apply_async(add, ('hello', 'world'), callback=print_result)
|
rmzoni/python3-training
|
study/functions.py
|
Python
|
apache-2.0
| 1,877
|
[
"Brian"
] |
bd9cc8f9c723bf28881857f6d9af735dc6475d0f1f5b0d5855785667d748277d
|
# -*- coding: utf-8 -*-
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Generic interfaces to manipulate registration parameters files, including
transform files (to configure warpings)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
import os.path as op
from ... import logging
from ..base import (BaseInterface, BaseInterfaceInputSpec, isdefined,
TraitedSpec, File, traits)
logger = logging.getLogger('interface')
class EditTransformInputSpec(BaseInterfaceInputSpec):
transform_file = File(exists=True, mandatory=True,
desc='transform-parameter file, only 1')
reference_image = File(exists=True,
desc=('set a new reference image to change the '
'target coordinate system.'))
interpolation = traits.Enum('cubic', 'linear', 'nearest', usedefault=True,
argstr='FinalBSplineInterpolationOrder',
desc='set a new interpolator for transformation')
output_type = traits.Enum('float', 'unsigned char', 'unsigned short', 'short',
'unsigned long', 'long', 'double',
argstr='ResultImagePixelType',
desc='set a new output pixel type for resampled images')
output_format = traits.Enum('nii.gz', 'nii', 'mhd', 'hdr', 'vtk',
argstr='ResultImageFormat',
desc='set a new image format for resampled images')
output_file = File(desc='the filename for the resulting transform file')
class EditTransformOutputSpec(TraitedSpec):
output_file = File(exists=True, desc='output transform file')
class EditTransform(BaseInterface):
"""
Manipulates an existing transform file generated with elastix
Example
-------
>>> from nipype.interfaces.elastix import EditTransform
>>> tfm = EditTransform()
>>> tfm.inputs.transform_file = 'TransformParameters.0.txt' # doctest: +SKIP
>>> tfm.inputs.reference_image = 'fixed1.nii' # doctest: +SKIP
>>> tfm.inputs.output_type = 'unsigned char'
>>> tfm.run() # doctest: +SKIP
"""
input_spec = EditTransformInputSpec
output_spec = EditTransformOutputSpec
_out_file = ''
_pattern = '\((?P<entry>%s\s\"?)([-\.\s\w]+)(\"?\))'
_interp = {'nearest': 0, 'linear': 1, 'cubic': 3}
def _run_interface(self, runtime):
import re
import nibabel as nb
import numpy as np
contents = ''
with open(self.inputs.transform_file, 'r') as f:
contents = f.read()
if isdefined(self.inputs.output_type):
p = re.compile((self._pattern % 'ResultImagePixelType').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % self.inputs.output_type
contents = p.sub(rep, contents)
if isdefined(self.inputs.output_format):
p = re.compile((self._pattern % 'ResultImageFormat').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % self.inputs.output_format
contents = p.sub(rep, contents)
if isdefined(self.inputs.interpolation):
p = re.compile((self._pattern % 'FinalBSplineInterpolationOrder').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % self._interp[self.inputs.interpolation]
contents = p.sub(rep, contents)
if isdefined(self.inputs.reference_image):
im = nb.load(self.inputs.reference_image)
if len(im.header.get_zooms()) == 4:
im = nb.func.four_to_three(im)[0]
size = ' '.join(["%01d" % s for s in im.shape])
p = re.compile((self._pattern % 'Size').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % size
contents = p.sub(rep, contents)
index = ' '.join(["0" for s in im.shape])
p = re.compile((self._pattern % 'Index').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % index
contents = p.sub(rep, contents)
spacing = ' '.join(["%0.4f" % f for f in im.header.get_zooms()])
p = re.compile((self._pattern % 'Spacing').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % spacing
contents = p.sub(rep, contents)
itkmat = np.eye(4)
itkmat[0, 0] = -1
itkmat[1, 1] = -1
affine = np.dot(itkmat, im.affine)
dirs = ' '.join(['%0.4f' % f for f in affine[0:3, 0:3].reshape(-1)])
orig = ' '.join(['%0.4f' % f for f in affine[0:3, 3].reshape(-1)])
# p = re.compile((self._pattern % 'Direction').decode('string-escape'))
# rep = '(\g<entry>%s\g<3>' % dirs
# contents = p.sub(rep, contents)
p = re.compile((self._pattern % 'Origin').decode('string-escape'))
rep = '(\g<entry>%s\g<3>' % orig
contents = p.sub(rep, contents)
with open(self._get_outfile(), 'w') as of:
of.write(contents)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = getattr(self, '_out_file')
return outputs
def _get_outfile(self):
val = getattr(self, '_out_file')
if val is not None and val != '':
return val
if isdefined(self.inputs.output_file):
setattr(self, '_out_file', self.inputs.output_file)
return self.inputs.output_file
out_file = op.abspath(op.basename(self.inputs.transform_file))
setattr(self, '_out_file', out_file)
return out_file
|
carolFrohlich/nipype
|
nipype/interfaces/elastix/utils.py
|
Python
|
bsd-3-clause
| 5,816
|
[
"VTK"
] |
1acec2477f38e738762c85aad6ae6a35a4e13896bca8b618e92665612561c29e
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import os
import sys
import tempfile
# pylint: disable=g-bad-todo
# TODO(#6568): Remove this hack that makes dlopen() not crash.
# pylint: enable=g-bad-todo
# pylint: disable=g-import-not-at-top
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
def _sparse_id_tensor(shape, vocab_size, seed=112123):
# Returns a arbitrary `SparseTensor` with given shape and vocab size.
np.random.seed(seed)
indices = np.array(list(itertools.product(*[range(s) for s in shape])))
# In order to create some sparsity, we include a value outside the vocab.
values = np.random.randint(0, vocab_size + 1, size=np.prod(shape))
# Remove entries outside the vocabulary.
keep = values < vocab_size
indices = indices[keep]
values = values[keep]
return sparse_tensor_lib.SparseTensor(
indices=indices, values=values, dense_shape=shape)
class FeatureColumnTest(test.TestCase):
def testImmutability(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumnWithHashBucket(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.string)
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.int64)
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, dtype=dtypes.float32)
def testSparseColumnWithVocabularyFile(self):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454)
self.assertEqual(b.dtype, dtypes.string)
self.assertEqual(b.lookup_config.vocab_size, 454)
self.assertEqual(b.lookup_config.vocabulary_file, "a_file")
with self.assertRaises(ValueError):
# Vocabulary size should be defined if vocabulary_file is used.
fc.sparse_column_with_vocabulary_file("bbb", vocabulary_file="somefile")
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.int64)
self.assertEqual(b.dtype, dtypes.int64)
with self.assertRaisesRegexp(ValueError, "dtype must be string or integer"):
b = fc.sparse_column_with_vocabulary_file(
"bbb", vocabulary_file="a_file", vocab_size=454, dtype=dtypes.float32)
def testWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertEqual(weighted_ids.name, "ids_weighted_by_weights")
def testEmbeddingColumn(self):
a = fc.sparse_column_with_hash_bucket(
"aaa", hash_bucket_size=100, combiner="sum")
b = fc.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testSharedEmbeddingColumn(self):
a1 = fc.sparse_column_with_keys("a1", ["marlo", "omar", "stringer"])
a2 = fc.sparse_column_with_keys("a2", ["marlo", "omar", "stringer"])
b = fc.shared_embedding_columns([a1, a2], dimension=4, combiner="mean")
self.assertEqual(len(b), 2)
self.assertEqual(b[0].shared_embedding_name, "a1_a2_shared_embedding")
self.assertEqual(b[1].shared_embedding_name, "a1_a2_shared_embedding")
# Create a sparse id tensor for a1.
input_tensor_c1 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
# Create a sparse id tensor for a2.
input_tensor_c2 = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]], values=[0, 1, 2], dense_shape=[3, 3])
with variable_scope.variable_scope("run_1"):
b1 = feature_column_ops.input_from_feature_columns({
b[0]: input_tensor_c1
}, [b[0]])
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explictly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
# a3 is a completely different sparse column with a1 and a2, but since the
# same shared_embedding_name is passed in, a3 will have the same embedding
# as a1 and a2
a3 = fc.sparse_column_with_keys("a3", [42, 1, -1000], dtype=dtypes.int32)
e = fc.shared_embedding_columns(
[a3],
dimension=4,
combiner="mean",
shared_embedding_name="my_shared_embedding")
with variable_scope.variable_scope("run_2"):
d1 = feature_column_ops.input_from_feature_columns({
d[0]: input_tensor_c1
}, [d[0]])
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
for i in range(len(d1_value)):
self.assertAllClose(d1_value[i], e1_value[i])
def testSharedEmbeddingColumnDeterminism(self):
# Tests determinism in auto-generated shared_embedding_name.
sparse_id_columns = tuple([
fc.sparse_column_with_keys(k, ["foo", "bar"])
for k in ["07", "02", "00", "03", "05", "01", "09", "06", "04", "08"]
])
output = fc.shared_embedding_columns(
sparse_id_columns, dimension=2, combiner="mean")
self.assertEqual(len(output), 10)
for x in output:
self.assertEqual(x.shared_embedding_name,
"00_01_02_plus_7_others_shared_embedding")
def testSharedEmbeddingColumnErrors(self):
# Tries passing in a string.
with self.assertRaises(TypeError):
invalid_string = "Invalid string."
fc.shared_embedding_columns(invalid_string, dimension=2, combiner="mean")
# Tries passing in a set of sparse columns.
with self.assertRaises(TypeError):
invalid_set = set([
fc.sparse_column_with_keys("a", ["foo", "bar"]),
fc.sparse_column_with_keys("b", ["foo", "bar"]),
])
fc.shared_embedding_columns(invalid_set, dimension=2, combiner="mean")
def testOneHotColumn(self):
a = fc.sparse_column_with_keys("a", ["a", "b", "c", "d"])
onehot_a = fc.one_hot_column(a)
self.assertEqual(onehot_a.sparse_id_column.name, "a")
self.assertEqual(onehot_a.length, 4)
b = fc.sparse_column_with_hash_bucket(
"b", hash_bucket_size=100, combiner="sum")
onehot_b = fc.one_hot_column(b)
self.assertEqual(onehot_b.sparse_id_column.name, "b")
self.assertEqual(onehot_b.length, 100)
def testOneHotReshaping(self):
"""Tests reshaping behavior of `OneHotColumn`."""
id_tensor_shape = [3, 2, 4, 5]
sparse_column = fc.sparse_column_with_keys(
"animals", ["squirrel", "moose", "dragon", "octopus"])
one_hot = fc.one_hot_column(sparse_column)
vocab_size = len(sparse_column.lookup_config.keys)
id_tensor = _sparse_id_tensor(id_tensor_shape, vocab_size)
for output_rank in range(1, len(id_tensor_shape) + 1):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
with self.test_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
def testOneHotColumnForWeightedSparseColumn(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
one_hot = fc.one_hot_column(weighted_ids)
self.assertEqual(one_hot.sparse_id_column.name, "ids_weighted_by_weights")
self.assertEqual(one_hot.length, 3)
def testRealValuedColumn(self):
a = fc.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = fc.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
c = fc.real_valued_column("ccc", dimension=None)
self.assertIsNone(c.dimension)
self.assertTrue(c.default_value is None)
with self.assertRaisesRegexp(TypeError, "dimension must be an integer"):
fc.real_valued_column("d3", dimension=1.0)
with self.assertRaisesRegexp(ValueError,
"dimension must be greater than 0"):
fc.real_valued_column("d3", dimension=0)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("d3", dtype=dtypes.string)
# default_value is an integer.
c1 = fc.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = fc.real_valued_column("c2", default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = fc.real_valued_column("c3", dimension=4, default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = fc.real_valued_column(
"c4", dimension=4, default_value=2, dtype=dtypes.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
c5 = fc.real_valued_column("c5", dimension=None, default_value=2)
self.assertListEqual(list(c5.default_value), [2])
# default_value is a float.
d1 = fc.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = fc.real_valued_column("d2", dimension=4, default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("d3", default_value=2., dtype=dtypes.int32)
d4 = fc.real_valued_column("d4", dimension=None, default_value=2.)
self.assertListEqual(list(d4.default_value), [2.])
# default_value is neither integer nor float.
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", default_value="string")
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("e1", dimension=3, default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = fc.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = fc.real_valued_column("f2", dimension=3, default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = fc.real_valued_column(
"f3", dimension=3, default_value=[2, 2, 2], dtype=dtypes.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = fc.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = fc.real_valued_column("g2", dimension=3, default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaisesRegexp(TypeError,
"default_value must be compatible with dtype"):
fc.real_valued_column("g3", default_value=[2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(
ValueError, "The length of default_value must be equal to dimension"):
fc.real_valued_column("g4", dimension=3, default_value=[2.])
# Default value is a list but dimension is None.
with self.assertRaisesRegexp(ValueError,
"Only scalar default value is supported "
"when dimension is None"):
fc.real_valued_column("g5", dimension=None, default_value=[2., 3.])
# Test that the normalizer_fn gets stored for a real_valued_column
normalizer = lambda x: x - 1
h1 = fc.real_valued_column("h1", normalizer=normalizer)
self.assertEqual(normalizer(10), h1.normalizer_fn(10))
# Test that normalizer is not stored within key
self.assertFalse("normalizer" in g1.key)
self.assertFalse("normalizer" in g2.key)
self.assertFalse("normalizer" in h1.key)
def testRealValuedColumnReshaping(self):
"""Tests reshaping behavior of `RealValuedColumn`."""
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
input_shape = [batch_size, sequence_length] + dimensions
real_valued_input = np.random.rand(*input_shape)
real_valued_column = fc.real_valued_column("values")
for output_rank in range(1, 3 + len(dimensions)):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(
real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
with self.test_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (input_shape[:output_rank - 1] +
[np.prod(input_shape[output_rank - 1:])])
self.assertEquals(expected_shape, list(real_valued_eval.shape))
def testRealValuedColumnDensification(self):
"""Tests densification behavior of `RealValuedColumn`."""
# No default value, dimension 1 float.
real_valued_column = fc.real_valued_column(
"sparse_real_valued1", dimension=None)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=[2.0, 5.0], indices=[[0, 0], [2, 0]], dense_shape=[3, 1])
densified_output = real_valued_column._to_dnn_input_layer(sparse_tensor)
# With default value, dimension 2 int.
real_valued_column_with_default = fc.real_valued_column(
"sparse_real_valued2",
dimension=None,
default_value=-1,
dtype=dtypes.int32)
sparse_tensor2 = sparse_tensor_lib.SparseTensor(
values=[2, 5, 9, 0],
indices=[[0, 0], [1, 1], [2, 0], [2, 1]],
dense_shape=[3, 2])
densified_output2 = real_valued_column_with_default._to_dnn_input_layer(
sparse_tensor2)
with self.test_session() as sess:
densified_output_eval, densified_output_eval2 = sess.run(
[densified_output, densified_output2])
self.assertAllEqual(densified_output_eval, [[2.0], [0.0], [5.0]])
self.assertAllEqual(densified_output_eval2, [[2, -1], [-1, 5], [9, 0]])
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = fc.bucketized_column(fc.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_bucketized")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column("bbb", [0])
with self.assertRaisesRegexp(
TypeError, "source_column must be an instance of _RealValuedColumn"):
fc.bucketized_column(
fc.sparse_column_with_integerized_feature(
column_name="bbb", bucket_size=10), [0])
def testBucketizedColumnRequiresRealValuedColumnDimension(self):
with self.assertRaisesRegexp(ValueError,
"source_column must have a defined dimension"):
fc.bucketized_column(fc.real_valued_column("bbb", dimension=None), [0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaisesRegexp(ValueError,
"boundaries must be a sorted list"):
fc.bucketized_column(fc.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_bucketized")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testBucketizedColumnDeepCopy(self):
"""Tests that we can do a deepcopy of a bucketized column.
This test requires that the bucketized column also accept boundaries
as tuples.
"""
bucketized = fc.bucketized_column(
fc.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(bucketized.name, "a_bucketized")
self.assertTupleEqual(bucketized.boundaries, (1., 2., 3.))
bucketized_copy = copy.deepcopy(bucketized)
self.assertEqual(bucketized_copy.name, "a_bucketized")
self.assertTupleEqual(bucketized_copy.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = fc.sparse_column_with_hash_bucket("aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
bucket = fc.bucketized_column(fc.real_valued_column("cost"), [0, 4])
crossed = fc.crossed_column(set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_bucketized", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_bucketized", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = fc.sparse_column_with_hash_bucket("bbb", hash_bucket_size=100)
with self.assertRaisesRegexp(
TypeError, "columns must be a set of _SparseColumn, _CrossedColumn, "
"or _BucketizedColumn instances"):
fc.crossed_column(
set([b, fc.real_valued_column("real")]), hash_bucket_size=10000)
def testFloat32WeightedSparseInt32ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int32)
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int32),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testFloat32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights")
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.float32)
}, weighted_ids.config)
def testInt32WeightedSparseStringColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", ["marlo", "omar", "stringer"])
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.string),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testInt32WeightedSparseInt64ColumnDtypes(self):
ids = fc.sparse_column_with_keys("ids", [42, 1, -1000], dtype=dtypes.int64)
weighted_ids = fc.weighted_sparse_column(ids, "weights", dtype=dtypes.int32)
self.assertDictEqual({
"ids": parsing_ops.VarLenFeature(dtypes.int64),
"weights": parsing_ops.VarLenFeature(dtypes.int32)
}, weighted_ids.config)
with self.assertRaisesRegexp(ValueError,
"dtype is not convertible to float"):
weighted_ids = fc.weighted_sparse_column(
ids, "weights", dtype=dtypes.string)
def testRealValuedColumnDtypes(self):
rvc = fc.real_valued_column("rvc")
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dimension=None)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.float32)
}, rvc.config)
rvc = fc.real_valued_column("rvc", dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.int32)
},
rvc.config)
rvc = fc.real_valued_column("rvc", dimension=None, dtype=dtypes.int32)
self.assertDictEqual(
{
"rvc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, rvc.config)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dtype=dtypes.string)
with self.assertRaisesRegexp(ValueError,
"dtype must be convertible to float"):
fc.real_valued_column("rvc", dimension=None, dtype=dtypes.string)
def testSparseColumnDtypes(self):
sc = fc.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
sc = fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.int32)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int32)
}, sc.config)
with self.assertRaisesRegexp(ValueError, "dtype must be an integer"):
fc.sparse_column_with_integerized_feature("sc", 10, dtype=dtypes.float32)
def testSparseColumnSingleBucket(self):
sc = fc.sparse_column_with_integerized_feature("sc", 1)
self.assertDictEqual(
{
"sc": parsing_ops.VarLenFeature(dtype=dtypes.int64)
}, sc.config)
self.assertEqual(1, sc._wide_embedding_lookup_arguments(None).vocab_size)
def testSparseColumnAcceptsDenseScalar(self):
"""Tests that `SparseColumn`s accept dense scalar inputs."""
batch_size = 4
dense_scalar_input = [1, 2, 3, 4]
sparse_column = fc.sparse_column_with_integerized_feature("values", 10)
features = {"values":
constant_op.constant(dense_scalar_input, dtype=dtypes.int64)}
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
with self.test_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
def testCreateFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
str_sparse_id_col = fc.sparse_column_with_keys(
"str_id_column", ["marlo", "omar", "stringer"])
int32_sparse_id_col = fc.sparse_column_with_keys(
"int32_id_column", [42, 1, -1000], dtype=dtypes.int32)
int64_sparse_id_col = fc.sparse_column_with_keys(
"int64_id_column", [42, 1, -1000], dtype=dtypes.int64)
weighted_id_col = fc.weighted_sparse_column(str_sparse_id_col,
"str_id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column1")
real_valued_col2 = fc.real_valued_column("real_valued_column2", 5)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", dimension=None)
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization2", 4),
[0, 4])
a = fc.sparse_column_with_hash_bucket("cross_aaa", hash_bucket_size=100)
b = fc.sparse_column_with_hash_bucket("cross_bbb", hash_bucket_size=100)
cross_col = fc.crossed_column(set([a, b]), hash_bucket_size=10000)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, int32_sparse_id_col,
int64_sparse_id_col, real_valued_col1, real_valued_col2,
real_valued_col3, bucketized_col1, bucketized_col2, cross_col
])
expected_config = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"str_id_column":
parsing_ops.VarLenFeature(dtypes.string),
"int32_id_column":
parsing_ops.VarLenFeature(dtypes.int32),
"int64_id_column":
parsing_ops.VarLenFeature(dtypes.int64),
"str_id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5], dtype=dtypes.float32),
"real_valued_column3":
parsing_ops.VarLenFeature(dtype=dtypes.float32),
"real_valued_column_for_bucketization1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32),
"real_valued_column_for_bucketization2":
parsing_ops.FixedLenFeature(
[4], dtype=dtypes.float32),
"cross_aaa":
parsing_ops.VarLenFeature(dtypes.string),
"cross_bbb":
parsing_ops.VarLenFeature(dtypes.string)
}
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual(expected_config, config)
# Test that the same config is parsed out if we pass a dictionary.
feature_columns_dict = {
str(i): val
for i, val in enumerate(feature_columns)
}
config = fc.create_feature_spec_for_parsing(feature_columns_dict)
self.assertDictEqual(expected_config, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = fc.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = fc.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = fc.real_valued_column(
"real_valued_column4", 3, default_value=[1, 0, 6])
real_valued_col5 = fc.real_valued_column(
"real_valued_column5", dimension=None, default_value=2)
feature_columns = [
real_valued_col1, real_valued_col2, real_valued_col3, real_valued_col4,
real_valued_col5
]
config = fc.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(5, len(config))
self.assertDictEqual(
{
"real_valued_column1":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[2.]),
"real_valued_column2":
parsing_ops.FixedLenFeature(
[5],
dtype=dtypes.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32, default_value=[8.]),
"real_valued_column4":
parsing_ops.FixedLenFeature(
[3], dtype=dtypes.float32, default_value=[1., 0., 6.]),
"real_valued_column5":
parsing_ops.VarLenFeature(dtype=dtypes.float32)
},
config)
def testCreateSequenceFeatureSpec(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
sparse_id_col = fc.sparse_column_with_keys("id_column",
["marlo", "omar", "stringer"])
weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
"id_weights_column")
real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
real_valued_col2 = fc.real_valued_column(
"real_valued_default_column", dimension=5, default_value=3.0)
real_valued_col3 = fc.real_valued_column(
"real_valued_var_len_column", dimension=None, default_value=3.0)
feature_columns = set([
sparse_col, embedding_col, weighted_id_col, real_valued_col1,
real_valued_col2, real_valued_col3
])
feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)
expected_feature_spec = {
"sparse_column":
parsing_ops.VarLenFeature(dtypes.string),
"sparse_column_for_embedding":
parsing_ops.VarLenFeature(dtypes.string),
"id_column":
parsing_ops.VarLenFeature(dtypes.string),
"id_weights_column":
parsing_ops.VarLenFeature(dtypes.float32),
"real_valued_column":
parsing_ops.FixedLenSequenceFeature(
shape=[2], dtype=dtypes.float32, allow_missing=False),
"real_valued_default_column":
parsing_ops.FixedLenSequenceFeature(
shape=[5], dtype=dtypes.float32, allow_missing=True),
"real_valued_var_len_column":
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
self.assertDictEqual(expected_feature_spec, feature_spec)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = fc.real_valued_column("real_valued_column", 5)
vlen_real_valued_col = fc.real_valued_column(
"vlen_real_valued_column", dimension=None)
bucketized_col = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization"), [0, 4])
feature_columns = set(
[sparse_col, real_valued_col, vlen_real_valued_col, bucketized_col])
placeholders = (
fc.make_place_holder_tensors_for_base_features(feature_columns))
self.assertEqual(4, len(placeholders))
self.assertTrue(
isinstance(placeholders["sparse_column"],
sparse_tensor_lib.SparseTensor))
self.assertTrue(
isinstance(placeholders["vlen_real_valued_column"],
sparse_tensor_lib.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertGreaterEqual(
placeholder.name.find(u"Placeholder_real_valued_column"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertGreaterEqual(
placeholder.name.find(
u"Placeholder_real_valued_column_for_bucketization"), 0)
self.assertEqual(dtypes.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
def testInitEmbeddingColumnWeightsFromCkpt(self):
sparse_col = fc.sparse_column_with_hash_bucket(
column_name="object_in_image", hash_bucket_size=4)
# Create _EmbeddingColumn which randomly initializes embedding of size
# [4, 16].
embedding_col = fc.embedding_column(sparse_col, dimension=16)
# Creating a SparseTensor which has all the ids possible for the given
# vocab.
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'layers.input_from_feature_columns' will create the embedding
# variable. Creating under scope 'run_1' so as to prevent name conflicts
# when creating embedding variable for 'embedding_column_pretrained'.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(embedding_col.name):
# This will return a [4, 16] tensor which is same as embedding variable.
embeddings = feature_column_ops.input_from_feature_columns({
embedding_col: input_tensor
}, [embedding_col])
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_embedding_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
embedding_col_initialized = fc.embedding_column(
sparse_id_column=sparse_col,
dimension=16,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
"input_from_feature_columns/object"
"_in_image_embedding/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the embedding from provided checkpoint and return a
# [4, 16] tensor which is same as embedding variable. Since we didn't
# modify embeddings, this should be same as 'saved_embedding'.
pretrained_embeddings = feature_column_ops.input_from_feature_columns({
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
self.assertAllClose(saved_embedding, loaded_embedding)
def testInitCrossedColumnWeightsFromCkpt(self):
sparse_col_1 = fc.sparse_column_with_hash_bucket(
column_name="col_1", hash_bucket_size=4)
sparse_col_2 = fc.sparse_column_with_keys(
column_name="col_2", keys=("foo", "bar", "baz"))
sparse_col_3 = fc.sparse_column_with_keys(
column_name="col_3", keys=(42, 1, -1000), dtype=dtypes.int64)
crossed_col = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2, sparse_col_3], hash_bucket_size=4)
input_tensor = sparse_tensor_lib.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
values=[0, 1, 2, 3],
dense_shape=[4, 4])
# Invoking 'weighted_sum_from_feature_columns' will create the crossed
# column weights variable.
with variable_scope.variable_scope("run_1"):
with variable_scope.variable_scope(crossed_col.name):
# Returns looked up column weights which is same as crossed column
# weights as well as actual references to weights variables.
_, col_weights, _ = (
feature_column_ops.weighted_sum_from_feature_columns({
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor,
sparse_col_3.name: input_tensor
}, [crossed_col], 1))
# Update the weights since default initializer initializes all weights
# to 0.0.
for weight in col_weights.values():
assign_op = state_ops.assign(weight[0], weight[0] + 0.5)
save = saver.Saver()
ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
"init_crossed_col_w_from_ckpt")
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
save.save(sess, checkpoint_path)
crossed_col_initialized = fc.crossed_column(
columns=[sparse_col_1, sparse_col_2],
hash_bucket_size=4,
ckpt_to_load_from=checkpoint_path,
tensor_name_in_ckpt=("run_1/col_1_X_col_2_X_col_3/"
"weighted_sum_from_feature_columns/"
"col_1_X_col_2_X_col_3/weights"))
with variable_scope.variable_scope("run_2"):
# This will initialize the crossed column weights from provided checkpoint
# and return a [4, 1] tensor which is same as weights variable. Since we
# won't modify weights, this should be same as 'saved_col_weights'.
_, col_weights, _ = (feature_column_ops.weighted_sum_from_feature_columns(
{
sparse_col_1.name: input_tensor,
sparse_col_2.name: input_tensor
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
self.assertAllClose(saved_col_weights, loaded_col_weights)
if __name__ == "__main__":
test.main()
|
sugartom/tensorflow-alien
|
tensorflow/contrib/layers/python/layers/feature_column_test.py
|
Python
|
apache-2.0
| 38,569
|
[
"MOOSE",
"Octopus"
] |
c36c2a64d5b77c9e725f820e1606820c3e9bfb1e12e6ac2773ab06caef8da3dc
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('show_data_table')
@click.argument("data_table_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, data_table_id):
"""Get details of a given data table.
Output:
A description of the given data table and its content.
For example::
{'columns': ['value', 'dbkey', 'name', 'path'],
'fields': [['test id',
'test',
'test name',
'/opt/galaxy-dist/tool-data/test/seq/test id.fa']],
'model_class': 'TabularToolDataTable',
'name': 'all_fasta'}
"""
return ctx.gi.tool_data.show_data_table(data_table_id)
|
galaxy-iuc/parsec
|
parsec/commands/tool_data/show_data_table.py
|
Python
|
apache-2.0
| 807
|
[
"Galaxy"
] |
2532fe21f8911301706f8732e7ab402828912b26fa720e6e4263ca97dd4c3bbf
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@brief: Import netcdf temperature data in GRASS GIS
This program is free software under the GNU General Public License
(>=v2). Read the file COPYING that comes with GRASS for details.
@author: Brendan Harmon (brendanharmon@gmail.com)
"""
import os
import sys
import csv
import atexit
import datetime
from dateutil.relativedelta import relativedelta
import grass.script as gscript
from grass.exceptions import CalledModuleError
# temporary region
gscript.use_temp_region()
# set environment
env = gscript.gisenv()
overwrite = True
env['GRASS_OVERWRITE'] = overwrite
env['GRASS_VERBOSE'] = False
env['GRASS_MESSAGE_FORMAT'] = 'standard'
gisdbase = env['GISDBASE']
location = env['LOCATION_NAME']
mapset = env['MAPSET']
# set path
temperature = os.path.join(gisdbase, 'climate_data','precip.mon.mean.nc')
# set temporal parameters
start_year = 1998
start_month = 1
end_year = 2016
end_month = 13
time = datetime.date(start_year, start_month, 1)
# set region
gscript.run_command('g.region',
n=10,
s=8,
e=-78,
w=-80,
res=0.3)
# create list
mean_temperature = []
# csv filepath
temperature_stats = os.path.join(gisdbase, 'temperature_stats.csv')
# write statistics to csv file
with open(temperature_stats, 'wb') as csvfile:
stats_writer = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
# write headers
stats_writer.writerow(['Time',
'Temperature(degC)'])
# process temperature rasters
i = 0
for year in range(start_year, end_year, 1):
for month in range(start_month, end_month, 1):
try:
# set map name variables
old = 'temperature_{year}_{month}@{mapset}'.format(year=time.year,
month=time.strftime('%m'),
mapset='temperature')
new = 'temperature_{year}_{month}'.format(year=time.year,
month=time.strftime('%m'),
mapset=mapset)
# import to mapset, crop map to region, and divide by ten
# since integer versions of temperature grids
# are stored in tenths of degrees C
gscript.run_command('r.mapcalc',
expression='{new} = {old}*0.1'.format(old=old,
new=new),
overwrite=overwrite)
# compute statistics
univar = gscript.parse_command('r.univar',
map=new,
separator='newline',
flags='g')
mean_temperature.append(univar['mean'])
# write data
stats_writer.writerow([time,
mean_temperature[i]])
# advance
i = i + 1
time = time + relativedelta(months=+1)
except:
pass
|
baharmon/panama_hydrological_modeling
|
utilities/temperature_stats.py
|
Python
|
gpl-2.0
| 2,943
|
[
"NetCDF"
] |
82bd00712a2ac3b5c43f56e35854391acd61878fd904a789fa6c82ac54379ab2
|
#!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2011, MBARI"
__credits__ = ["Chander Ganesan, Open Technology Group"]
__license__ = "GPL"
__version__ = "$Revision: 1.1 $".split()[1]
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__status__ = "Development"
'''
The DAPloaders module contains classes for reading data from OPeNDAP servers and
loading into the STOQS database. It assumes that all data are on the 4 spatial-
temporal dimensions as defined in the COARDS/CF convention. There are custom
derived classes here that understand, Mooring (Station and StationProfile), AUV
and Glider (Trajectory) CDM Data Types.
Mike McCann
MBARI Dec 29, 2011
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@status: __status__
@license: __license__
'''
# Force lookup of models to THE specific stoqs module.
import os
import re
import sys
from argparse import Namespace
from django.contrib.gis.geos import LineString, Point
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # config is one dir up
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.local'
from django.conf import settings
from django.db.models import Max
from django.db.utils import IntegrityError, DatabaseError
from django.db import transaction
from jdcal import gcal2jd, jd2gcal
from stoqs.models import (Activity, InstantPoint, Measurement, MeasuredParameter,
NominalLocation, Resource, ResourceType, ActivityResource,
Parameter)
from datetime import datetime, timedelta
from psycopg2.errors import UniqueViolation
import pytz
from pydap.client import open_url
import pydap.model
import math
from coards import to_udunits, from_udunits, ParserError
import logging
import socket
import seawater.eos80 as sw
from utils.utils import mode, simplify_points
from loaders import (STOQS_Loader, SkipRecord, HasMeasurement, MEASUREDINSITU, FileNotFound,
SIGMAT, SPICE, SPICINESS, ALTITUDE)
from loaders.SampleLoaders import get_closest_instantpoint, ClosestTimeNotFoundException
import numpy as np
import psycopg2
from collections import defaultdict
# Set up logging
logger = logging.getLogger(__name__)
# Logging level set in stoqs/config/common.py or via command line from LoadScript(), but may override here
##logger.setLevel(logging.INFO)
# When settings.DEBUG is True Django will fill up a hash with stats on every insert done to the database.
# "Monkey patch" the CursorWrapper to prevent this. Otherwise we can't load large amounts of data.
# See http://stackoverflow.com/questions/7768027/turn-off-sql-logging-while-keeping-settings-debug
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorWrapper
TRAJECTORY = 'trajectory'
TIMESERIES = 'timeseries'
TIMESERIESPROFILE = 'timeseriesprofile'
TRAJECTORYPROFILE = 'trajectoryprofile'
TIME = 'time'
DEPTH = 'depth'
LATITUDE = 'latitude'
LONGITUDE = 'longitude'
# Set batch_size such that we avoid swapping with bulk_create() on a 3 GB RAM system, a value = 10000 is good
# Significant swap disk is used (12%) and loads of DEIMOS data take 20% longer with BATCH_SIZE=100000
# Some loads (e.g. stoqs_canon_october2020) will crash postgresql unless BATCH_SIZE is reduced to 1000
# Update on 6 March 2020:
# A more raw version of the DEIMOS data with 2619 depths in each profile runs out of memory unless it's
# run on a VM with more than 10 GB of RAM. Reducing BATCH_SIZE to 4 helps some with the memory requirement
# but will still crash (be killed) on a 3 GB VM and takes will take twice the time on a bigger VM.
# TODO: Load these data as trajectoryProfile with point simplification (removal of redundant data points).
BATCH_SIZE=10000
if settings.DEBUG:
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
class ParameterNotFound(Exception):
pass
class NoValidData(Exception):
pass
class AuxCoordMissingStandardName(Exception):
pass
class VariableMissingCoordinatesAttribute(Exception):
pass
class VariableHasBadCoordinatesAttribute(Exception):
pass
class InvalidSliceRequest(Exception):
pass
class OpendapError(Exception):
pass
class DuplicateData(Exception):
pass
class CoordNotEqual(Exception):
pass
class Base_Loader(STOQS_Loader):
'''
A base class for data load operations. This shouldn't be instantiated directly,
instead a loader for a particular platform should inherit from it. Since
each platform could have its own parameters, etc. each platform (at a minimum)
should declare the overridden names, ignored names, etc..
The time bounds of an Activities can be specified in two ways:
1. By specifying startDatetime and endDatetime. This is handy for extracting a subset
of data from an OPeNDAP data source, e.g. aggregated Mooring data, to populate a
campaign specific database
2. By setting startDatetime and endDatetime to None, in which case the start and end
times are defined by the start and end of the data in the specified url
A third time parameter (dataStartDatetime) can be specified. This is used for when
data is to be appended to an existing activity, such as for the realtime tethys loads
as done by the monitorLrauv.py script in the realtime folder. This
use has not been fully tested.
'''
def __init__(self, activityName, platformName, url, dbAlias='default', campaignName=None, campaignDescription=None,
activitytypeName=None, platformColor=None, platformTypeName=None,
startDatetime=None, endDatetime=None, dataStartDatetime=None, auxCoords=None, stride=1,
grdTerrain=None, command_line_args=None):
'''
Given a URL open the url and store the dataset as an attribute of the object,
then build a set of standard names using the dataset.
The activity is saved, as all the data loaded will be a set of instantpoints
that use the specified activity.
stride is used to speed up loads by skipping data.
@param activityName: A string describing this activity
@param platformName: A string that is the name of the platform.
If that name for a Platform exists in the DB, it will be used.
@param platformColor: An RGB hex string represnting the color of the platform.
@param url: The OPeNDAP URL for the data source
@param dbAlias: The name of the database alias as defined in settings.py
@param campaignName: A string describing the Campaign in which this activity belongs.
If that name for a Campaign exists in the DB, it will be used.
@param campaignDescription: A string expanding on the campaignName.
It should be a short phrase expressing the where and why of a campaign.
@param activitytypeName: A string such as 'mooring deployment' or 'AUV mission' describing type of
activity, If that name for a ActivityType exists in the DB, it will be used.
@param platformTypeName: A string describing the type of platform, e.g.: 'mooring', 'auv'.
If that name for a PlatformType exists in the DB, it will be used.
@param startDatetime: A Python datetime.dateime object specifying the start date time of data to load
@param endDatetime: A Python datetime.dateime object specifying the end date time of data to load
@param dataStartDatetime: A Python datetime.dateime object specifying the start date time of data
to append to an existing Activity
@param command_line_args.append: If true then a dataStartDatetime value will be set by looking up the last
timevalue in the database for the Activity returned by getActivityName().
A True value will override the passed parameter dataStartDatetime.
@param auxCoords: a dictionary of coordinate standard_names (time, latitude, longitude, depth)
pointing to exact names of those coordinates. Used for variables missing the
coordinates attribute.
@param stride: The stride/step size used to retrieve data from the url.
'''
self.campaignName = campaignName
self.campaignDescription = campaignDescription
self.activitytypeName = activitytypeName
self.platformName = platformName
self.platformColor = platformColor
self.dbAlias = dbAlias
self.platformTypeName = platformTypeName
self.activityName = activityName
self.requested_startDatetime = startDatetime
self.startDatetime = startDatetime
self.requested_endDatetime = endDatetime
self.endDatetime = endDatetime
self.dataStartDatetime = dataStartDatetime # For when we append data to an existing Activity
self.auxCoords = auxCoords
self.stride = stride
self.grdTerrain = grdTerrain
self.command_line_args = command_line_args
self.coord_dicts = {}
self.url = url
self.varsLoaded = []
try:
self.ds = open_url(url)
except (socket.error, pydap.exceptions.ServerError, pydap.exceptions.ClientError):
message = 'Failed in attempt to open_url("%s")' % url
self.logger.warn(message)
# Give calling routing option of catching and ignoring
raise OpendapError(message)
except Exception as e:
# Prevent multiline WARNINGs in the output log files
message = str(e).split('\n')[0]
self.logger.warn(f"Failed in attempt to open_url('{url}'): {message}")
raise
self.ignored_names = list(self.global_ignored_names) # Start with copy of list of global ignored names
self.build_standard_names()
def _getStartAndEndTimeFromDS(self):
'''
Examine all possible time coordinates for include_names and set the overall min and max time for the dataset.
To be used for setting Activity startDatetime and endDatetime.
'''
# TODO: Refactor to simplify. McCabe MC0001 pylint complexity warning issued.
# TODO: Parse EPIC time and time2 variables
minDT = {}
maxDT = {}
for v in self.include_names:
try:
ac = self.coord_dicts[v]
except KeyError as e:
self.logger.debug(str(e))
continue
if self.getFeatureType() == TRAJECTORY or self.getFeatureType() == TRAJECTORYPROFILE:
self.logger.debug('Getting trajectory min and max times for v = %s', v)
self.logger.debug("self.ds[ac['time']][0] = %s", self.ds[ac['time']][0])
try:
minDT[v] = from_udunits(self.ds[ac['time']].data[0][0], self.ds[ac['time']].attributes['units'])
maxDT[v] = from_udunits(self.ds[ac['time']].data[-1][0], self.ds[ac['time']].attributes['units'])
except ParserError as e:
self.logger.warn("%s. Trying to fix up time units", e)
# Tolerate units like 1970-01-01T00:00:00Z - which is found on the IOOS Glider DAC
if self.ds[ac['time']].attributes['units'] == 'seconds since 1970-01-01T00:00:00Z':
minDT[v] = from_udunits(self.ds[ac['time']].data[0][0], 'seconds since 1970-01-01 00:00:00')
maxDT[v] = from_udunits(self.ds[ac['time']].data[-1][0], 'seconds since 1970-01-01 00:00:00')
except ValueError as e:
self.logger.warn(f'Skipping load of {self.url}: {e}')
raise NoValidData(f'Could not get min and max time from {self.url}')
elif self.getFeatureType() == TIMESERIES or self.getFeatureType() == TIMESERIESPROFILE: # pragma: no cover
self.logger.debug('Getting timeseries start time for v = %s', v)
time_units = self.ds[list(self.ds[v].maps.keys())[0]].units.lower()
if time_units == 'true julian day':
self.logger.debug('Converting EPIC times to epoch seconds')
tindx = self.getTimeBegEndIndices(self.ds[list(self.ds[v].keys())[1]])
times = self.ds[list(self.ds[v].maps.keys())[0]].data[tindx[0]:tindx[-1]:self.stride]
times, time_units = self._convert_EPIC_times(times, tindx)
minDT[v] = from_udunits(times[0], time_units)
maxDT[v] = from_udunits(times[-1], time_units)
else:
minDT[v] = from_udunits(self.ds[v][ac['time']].data[0][0], self.ds[ac['time']].attributes['units'])
maxDT[v] = from_udunits(self.ds[v][ac['time']].data[-1][0], self.ds[ac['time']].attributes['units'])
else:
# Perhaps a strange file like LOPC size class data along a trajectory
minDT[v] = from_udunits(self.ds[ac['time']].data[0][0], self.ds[ac['time']].attributes['units'])
maxDT[v] = from_udunits(self.ds[ac['time']].data[-1][0], self.ds[ac['time']].attributes['units'])
self.logger.debug('minDT = %s', minDT)
self.logger.debug('maxDT = %s', maxDT)
# STOQS does not deal with data in the future and in B.C.
startDatetime = datetime.utcnow()
endDatetime = datetime(1,1,1)
for v, dt in list(minDT.items()):
try:
if dt < startDatetime:
startDatetime = dt
except NameError:
startDatetime = dt
for v, dt in list(maxDT.items()):
try:
if dt > endDatetime:
endDatetime = dt
except NameError:
endDatetime = dt
if not maxDT or not minDT:
raise NoValidData('No valid dates')
self.logger.info('Activity startDatetime = %s, endDatetime = %s', startDatetime, endDatetime)
return startDatetime, endDatetime
def initDB(self):
'''
Do the intial Database activities that are required before the data are processed: getPlatorm and createActivity.
Can be overridden by sub class. An overriding method can do such things as setting startDatetime and endDatetime.
'''
if hasattr(self, 'command_line_args'):
if hasattr(self.command_line_args, 'append') and hasattr(self.command_line_args, 'remove_appended_activities'):
if self.command_line_args.append and self.command_line_args.remove_appended_activities:
self.remove_appended_activities()
if self.checkForValidData():
self.platform = self.getPlatform(self.platformName, self.platformTypeName)
self.add_parameters(self.ds)
if hasattr(self, 'add_to_activity'):
# Allow use of existing Activity for loading additional data, e.g. Dorado plankton_proxies
self.logger.info(f"Will add these data to Activity {self.add_to_activity}")
else:
# Ensure that startDatetime and startDatetime are defined as they are required fields of Activity
if not self.startDatetime or not self.endDatetime:
self.startDatetime, self.endDatetime = self._getStartAndEndTimeFromDS()
self.createActivity()
else:
raise NoValidData('No valid data in url %s' % (self.url))
def getmissing_value(self, var):
'''
Return the missing_value attribute for netCDF variable var
'''
mv = None
try:
mv = float(self.ds[var].attributes['missing_value'])
except KeyError:
if 'nemesis' in self.url and var in ('u', 'v'):
self.logger.debug('Special fix for nemesis data, return a standard missing_value of -1.e34')
mv = -1.0e34
else:
self.logger.debug('Cannot get attribute missing_value for variable %s from url %s', var, self.url)
except AttributeError as e:
self.logger.debug(str(e))
return mv
def get_FillValue(self, var):
'''
Return the _FillValue attribute for netCDF variable var
'''
fv = None
try:
fv = float(self.ds[var].attributes['_FillValue'])
except KeyError:
self.logger.debug('Cannot get attribute _FillValue for variable %s from url %s', var, self.url)
try:
# Fred's L_662 and other glider data files have the 'FillValue' attribute, not '_FillValue'
fv = float(self.ds[var].attributes['FillValue'])
except KeyError:
try:
# http://odss.mbari.org/thredds/dodsC/CANON/2013_Sep/Platforms/AUVs/Daphne/NetCDF/Daphne_CANON_Fall2013.nc.html has 'fill_value'
fv = float(self.ds[var].attributes['fill_value'])
except Exception as e:
self.logger.debug('Cannot get FillValue for variable %s from url %s: %s', var, self.url, str(e))
except ValueError as e:
self.logger.warn('%s for variable %s from url %s', str(e), var, self.url)
except AttributeError as e:
self.logger.debug(str(e))
return fv
def get_shape_length(self, pname):
'''Works for both pydap 3.1.1 and 3.2.0
'''
try:
shape_length = len(self.ds[pname].shape)
except AttributeError:
# Likely using pydap 3.2+
shape_length = len(self.ds[pname].array.shape)
return shape_length
def getActivityName(self):
'''Return actual Activity name that will be in the database accounting
for permutations of startDatetime and stride values per NetCDF file name.
'''
# Modify Activity name if temporal subset extracted from NetCDF file
newName = self.activityName
if not ' starting at ' in newName:
if hasattr(self, 'requested_startDatetime') and hasattr(self, 'requested_endDatetime'):
if self.requested_startDatetime and self.requested_endDatetime:
if '(stride' in self.activityName:
first_part = self.activityName[:self.activityName.find('(stride')]
last_part = self.activityName[self.activityName.find('(stride'):]
else:
first_part = self.activityName
last_part = ''
newName = '{} starting at {} {}'.format(first_part.strip(), self.requested_startDatetime, last_part)
return newName
def getFeatureType(self):
'''
Return string of featureType from table at http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/ch09.html.
Accomodate previous concepts of this attribute and convert to the new discrete sampling geometry conventions in CF-1.6.
Possible return values: TRAJECTORY, TIMESERIES, TIMESERIESPROFILE, lowercase versions.
'''
conventions = ''
if hasattr(self, 'ds'):
try:
nc_global_keys = self.ds.attributes['NC_GLOBAL']
except KeyError:
self.logger.warn('Dataset does not have an NC_GLOBAL attribute! Setting featureType to "trajectory" assuming that this is an old Tethys file')
return TRAJECTORY
else:
self.logger.warn('Loader has no ds attribute. Setting featureType to "trajectory" assuming that this is an ROVCTD Loader.')
return TRAJECTORY
if 'Conventions' in nc_global_keys:
conventions = self.ds.attributes['NC_GLOBAL']['Conventions'].lower()
elif 'Convention' in nc_global_keys:
conventions = self.ds.attributes['NC_GLOBAL']['Convention'].lower()
elif 'conventions' in nc_global_keys: # pragma: no cover
conventions = self.ds.attributes['NC_GLOBAL']['conventions'].lower()
else:
conventions = ''
if 'cf-1.6' in conventions.lower():
try:
featureType = self.ds.attributes['NC_GLOBAL']['featureType']
except KeyError:
# For https://dods.ndbc.noaa.gov/thredds/dodsC/oceansites/DATA/MBARI/OS_MBARI-M1_20160829_R_TS.nc.das
featureType = self.ds.attributes['NC_GLOBAL']['cdm_data_type']
else:
# Accept earlier versions of the concept of this attribute that may be in legacy data sets
if 'cdm_data_type' in nc_global_keys:
featureType = self.ds.attributes['NC_GLOBAL']['cdm_data_type']
elif 'thredds_data_type' in nc_global_keys:
featureType = self.ds.attributes['NC_GLOBAL']['thredds_data_type']
elif 'CF%3afeatureType' in nc_global_keys:
featureType = self.ds.attributes['NC_GLOBAL']['CF%3afeatureType']
elif 'CF_featureType' in nc_global_keys:
featureType = self.ds.attributes['NC_GLOBAL']['CF_featureType']
elif 'CF:featureType' in nc_global_keys: # Seen in lrauv/*/realtime/sbdlogs files
featureType = self.ds.attributes['NC_GLOBAL']['CF:featureType']
elif 'featureType' in nc_global_keys: # Seen in roms.nc file from JPL
featureType = self.ds.attributes['NC_GLOBAL']['featureType']
else:
featureType = ''
if featureType.lower() == 'station':
# Used in elvis' TDS mooring data aggregation, it's really 'timeseriesprofile'
featureType = TIMESERIESPROFILE
if featureType.lower() == 'trajectory':
featureType = TRAJECTORY
# Put the CF-1.6 proper featureType into NC_GLOBAL so that addResources will put it into the database
self.ds.attributes['NC_GLOBAL']['featureType'] = featureType
return featureType.lower()
def _getCoordinates(self, from_variables):
'''Return tuple of (Dictionary of geospatial/temporal standard_names keyed by variable name,
Dictionary of variable names keyed by geospatial/temporal standard_names).
'''
coordSN = {}
snCoord = {}
for k in from_variables:
try:
if 'standard_name' in self.ds[k].attributes:
if self.ds[k].attributes['standard_name'] in ('time', 'latitude', 'longitude', 'depth'):
coordSN[k] = self.ds[k].attributes['standard_name']
snCoord[self.ds[k].attributes['standard_name']] = k
except KeyError:
self.logger.error(f"Could not find variable {k} in the file. Perhaps there's a problem with the coordinates attribute?")
raise
return coordSN, snCoord
def getAuxCoordinates(self, variable):
'''
Return a dictionary of a variable's auxilary coordinates mapped to the standard_names of 'time', 'latitude',
'longitude', and 'depth'. Accommodate previous ways of associating these variables and convert to the new
CF-1.6 conventions as outlined in Chapter 5 of the document. If an auxCoord dictionary is passed to the
Loader then that dictionary will be returned for variables that do not have a valid coordinates attribute;
this is handy for datasets that are not yet compliant.
Requirements for compliance: variables have a coordinates attribute listing the 4 geospatial/temporal
coordinates, the coordinate variables have standard_names of 'time', 'latitude', 'longitude', 'depth'.
Example return value: {'time': 'esecs', 'depth': 'DEPTH', 'latitude': 'lat', 'longitude': 'lon'}
'''
# Match items in coordinate attribute, via coordinate standard_name to coordinate name
if variable not in self.ds:
raise ParameterNotFound('Variable %s is not in dataset %s' % (variable, self.url))
coord_dict = {}
if 'coordinates' in self.ds[variable].attributes:
coords = self.ds[variable].attributes['coordinates'].split()
try:
coordSN, snCoord = self._getCoordinates(coords)
except KeyError as e:
self.logger.error(f"Could not get coordinates for {variable}. Check its coordinates attribute.")
raise VariableHasBadCoordinatesAttribute(e)
for coord in coords:
self.logger.debug(coord)
try:
self.logger.debug(snCoord)
coord_dict[coordSN[coord]] = coord
except KeyError as e:
if coord == 'trajectory':
self.logger.info(f"Found 'trajectory' in coordinates attribute. Likely a Saildrone or GliderDAC trajectory file.")
else:
raise AuxCoordMissingStandardName(e)
else:
self.logger.debug('Variable %s is missing coordinates attribute, checking if loader has specified it in auxCoords', variable)
if variable in self.auxCoords:
# Try getting it from overridden values provided
for coordSN, coord in list(self.auxCoords[variable].items()):
try:
coord_dict[coordSN] = coord
except KeyError as e:
raise AuxCoordMissingStandardName(e)
else:
self.logger.warn('%s not in auxCoords' % variable)
# Check for all 4 coordinates needed for spatial-temporal location - if any are missing raise exception with suggestion
reqCoords = set(('time', 'latitude', 'longitude', 'depth'))
self.logger.debug('coord_dict = %s', coord_dict)
if set(coord_dict.keys()) != reqCoords:
self.logger.debug('Required coordinate(s) %s missing in NetCDF file.',
list(reqCoords - set(coord_dict.keys())))
if not self.auxCoords:
raise VariableMissingCoordinatesAttribute('%s: %s missing coordinates attribute' % (self.url, variable,))
self.logger.debug('coord_dict = %s', coord_dict)
if not coord_dict or set(coord_dict.keys()) != reqCoords: # pragma: no cover
if self.auxCoords:
if variable in self.auxCoords:
# Simply return self.auxCoords if specified in the constructor
self.logger.debug('Returning auxCoords for variable %s that were specified in the constructor: %s', variable, self.auxCoords[variable])
return self.auxCoords[variable]
else:
raise ParameterNotFound('auxCoords is specified, but variable requested (%s) is not in %s' % (variable, self.auxCoords))
else:
return coord_dict
def getNominalLocation(self):
'''
For timeSeries and timeSeriesProfile data return nominal location as a tuple of (depth, latitude, longitude) as
expressed in the coordinate variables of the mooring or station. For timeSeries features depth will be a scalar,
for timeSeriesProfile depth will be an array of depths. For timeSeries and timeSeriesProfile variables with precise
longitudes and latitudes ignore them here - this method returns just the single nominal horizontal position.
'''
depths = {}
lats = {}
lons = {}
for v in self.include_names:
self.logger.debug('v = %s', v)
try:
ac = self.coord_dicts[v]
except KeyError as e:
self.logger.debug('Skipping include_name = %s: %s', v, e)
continue
# depth may be single-valued or an array
if self.getFeatureType() == TIMESERIES:
self.logger.debug('Initializing depths list for timeseries, ac = %s', ac)
try:
if 'depth' in ac:
depths[v] = self.ds[v][ac['depth']].data[:][0]
except KeyError:
self.logger.warn('No depth coordinate found for %s. Assuming EPIC scalar and assigning depth from first element', v)
depths[v] = self.ds[ac['depth']].data[0]
elif self.getFeatureType() == TIMESERIESPROFILE:
self.logger.debug('Initializing depths list for timeseriesprofile, ac = %s', ac)
try:
depths[v] = self.ds[v][ac['depth']].data[:]
except KeyError:
# Likely a TIMESERIES variable in a TIMESERIESPROFILE file (e.g. heading in ADCP file)
# look elsewhere for a nominal depth
if 'nominal_sensor_depth' in self.ds.attributes['NC_GLOBAL']:
# Hard-coded CCE EPIC nominal depth
depths[v] = [float(self.ds.attributes['NC_GLOBAL']['nominal_sensor_depth'])]
else:
self.logger.warning(f"Could not find {ac['depth']} for variable {v} in {self.url}, attempting to hard-code the depth with 'ADCP_DEPTH'")
if v == 'SW_FLUX_HR':
self.logger.info(f"Attempting to hard-code the depth with the first value from 'HR_DEPTH_0'")
depths[v] = [self.ds['HR_DEPTH_0'].data[:][0]]
else:
self.logger.info(f"Attempting to hard-code the depth with the first value from 'ADCP_DEPTH'")
depths[v] = [self.ds['ADCP_DEPTH'].data[:][0]]
elif self.getFeatureType() == TRAJECTORYPROFILE:
self.logger.debug('Initializing depths list for trajectoryprofile, ac = %s', ac)
depths[v] = self.ds[v][ac['depth']].data[:]
try:
lons[v] = self.ds[v][ac['longitude']].data[:][0]
except KeyError:
if len(self.ds[ac['longitude']].data[:]) == 1:
lons[v] = self.ds[ac['longitude']].data[:][0]
elif len(self.ds[ac['longitude']].data[:]) == 2:
# OASIS ADCP data has GPS_LONGITUDE and GPS_LATITUDE time series in auxillary coordinate
self.logger.debug(f"Auxillary longitude coordinate {ac['longitude']} is a variable with"
f" {len(self.ds[ac['longitude']][ac['longitude']].data[:])} points")
self.logger.info(f"Using COARDS coordinate for {v}'s longitude")
try:
lons[v] = self.ds[list(self.ds[v].maps.keys())[3]].data[:][0]
except IndexError:
self.logger.warn(f'Cannot get nominal longitude coordinate using COARDS rules: self.ds[v].keys() = {self.ds[v].keys()}')
else:
self.logger.warn('Variable %s has longitude auxillary coordinate of length %d, expecting it to be 1.',
v, len(self.ds[ac['longitude']].data[:]))
try:
lats[v] = self.ds[v][ac['latitude']].data[:][0]
except KeyError:
if len(self.ds[ac['latitude']].data[:]) == 1:
lats[v] = self.ds[ac['latitude']].data[:][0]
elif len(self.ds[ac['latitude']].data[:]) == 2:
# OASIS ADCP data has GPS_LONGITUDE and GPS_LATITUDE time series in auxillary coordinate
self.logger.debug(f"Auxillary latitude coordinate {ac['latitude']} is a variable with"
f" {len(self.ds[ac['latitude']][ac['latitude']].data[:])} points")
self.logger.info(f"Using COARDS coordinate for {v}'s latitude")
try:
lats[v] = self.ds[list(self.ds[v].maps.keys())[2]].data[:][0]
except IndexError:
self.logger.warn(f'Cannot get nominal latitude coordinate using COARDS rules: self.ds[v].keys() = {self.ds[v].keys()}')
else:
self.logger.warn('Variable %s has latitude auxillary coordidate of length %d, expecting it to be 1.',
v, len(self.ds[ac['latitude']].data[:]))
# All variables must have the same nominal location
if len(set(lats.values())) != 1 or len(set(lons.values())) != 1:
raise Exception('Invalid file coordinates structure. All variables must have'
' identical nominal lat & lon, lats = %s, lons = %s' % lats, lons)
return depths, lats, lons
def getTimeBegEndIndices(self, timeAxis):
'''
Return beginning and ending indices for the corresponding time axis indices
'''
if not getattr(self, 'startDatetime', None) and not getattr(self, 'endDatetime', None):
s = 0
e = timeAxis.shape[0]
return s, e
isEPIC = False
try:
isEPIC = 'EPIC' in self.ds.attributes['NC_GLOBAL']['Conventions'].upper()
except KeyError:
# No 'Conventions' key on 'NC_GLOBAL', check another way, e.g.
# http://dods.mbari.org/opendap/data/CCE_Archive/MS1/20151006/CTOBSTrans9m/MBCCE_MS1_CTOBSTrans9m_20151006.nc
# does not have a Conventions global attribute, so also check for time, time2 and the units
isEPIC = 'time' in self.ds.keys() and 'time2' in self.ds.keys() and self.ds['time'].attributes['units'] == 'True Julian Day'
if isEPIC:
self.logger.warn("%s does not have 'Conventions', yet appears to be EPIC from its time/time2 variables", self.url)
if isEPIC:
# True Julian dates are at noon, so take int() to match EPIC's time axis values and to satisfy:
# datum: Time (UTC) in True Julian Days: 2440000 = 0000 h on May 23, 1968
# NOTE: Decimal Julian day [days] = time [days] + ( time2 [msec] / 86400000 [msec/day] )
jbd = int(sum(gcal2jd(self.startDatetime.year, self.startDatetime.month, self.startDatetime.day)) + 0.5)
jed = int(sum(gcal2jd(self.endDatetime.year, self.endDatetime.month, self.endDatetime.day)) + 0.5)
t_indx = np.where((jbd <= timeAxis) & (timeAxis <= jed))[0]
if not t_indx.any():
raise NoValidData('No data from %s for time values between %s and %s. Skipping.' % (self.url,
self.startDatetime, self.endDatetime))
# Refine indicies with fractional portion of the day (ms since midnight) as represented in the time2 variable
bms = 0
if self.startDatetime.hour or self.startDatetime.minute or self.startDatetime.second:
bms = self.startDatetime.hour * 3600000 + self.startDatetime.minute * 60000 + self.startDatetime.second * 1000
ems = 86400000
if self.endDatetime.hour or self.endDatetime.minute or self.endDatetime.second:
ems = self.endDatetime.hour * 3600000 + self.endDatetime.minute * 60000 + self.endDatetime.second * 1000
# Tolerate datasets that begin or end inside the limits of self.startDatetime and self.endDatetime
beg_day_indices = np.where(jbd == timeAxis)[0]
t2_indx_beg = 0
if beg_day_indices.any():
time2_axis_beg = self.ds['time2']['time2'][beg_day_indices[0]:beg_day_indices[-1]]
try:
t2_indx_beg = np.where(bms <= time2_axis_beg)[0][0]
except IndexError:
# Likely no bms <= time2_axis_beg, leave t2_indx_beg = 0
pass
end_day_indices = np.where(jed == timeAxis)[0]
t2_indx_end = 0
if end_day_indices.any():
if end_day_indices[0] > 0 and (end_day_indices[0] == end_day_indices[-1]):
time2_axis_end = self.ds['time2']['time2'][int(end_day_indices[0]) - 1:int(end_day_indices[-1])]
else:
time2_axis_end = self.ds['time2']['time2'][int(end_day_indices[0]):int(end_day_indices[-1])]
try:
t2_indx_end = len(time2_axis_end) - np.where(ems >= time2_axis_end)[0][-1]
except IndexError:
# Likely ems ls less than the sampling interval, resulting in an empty np.where(ems >= time2_axis_end)
t2_indx_end = len(time2_axis_end)
indices = t_indx[0] + t2_indx_beg, t_indx[-1] - t2_indx_end
return indices
timeAxisUnits = timeAxis.units.lower()
timeAxisUnits = timeAxisUnits.replace('utc', 'UTC') # coards requires UTC to be upper case
if timeAxis.units == 'seconds since 1970-01-01T00:00:00Z'or timeAxis.units == 'seconds since 1970/01/01 00:00:00Z':
timeAxisUnits = 'seconds since 1970-01-01 00:00:00' # coards doesn't like ISO format
if self.startDatetime:
self.logger.debug('self.startDatetime, timeAxis.units = %s, %s', self.startDatetime, timeAxis.units)
s = to_udunits(self.startDatetime, timeAxisUnits)
self.logger.debug("For startDatetime = %s, the udnits value is %f", self.startDatetime, s)
if self.dataStartDatetime:
# Override s if self.dataStartDatetime is specified
self.logger.debug('self.dataStartDatetime, timeAxis.units = %s, %s', self.dataStartDatetime, timeAxis.units)
s = to_udunits(self.dataStartDatetime, timeAxisUnits)
self.logger.debug("For dataStartDatetime = %s, the udnits value is %f", self.dataStartDatetime, s)
if self.requested_endDatetime:
# endDatetime may be None, in which case just read until the end
e = to_udunits(self.endDatetime, timeAxisUnits)
self.logger.debug("For endDatetime = %s, the udnits value is %f", self.endDatetime, e)
else:
e = timeAxis[-1]
self.logger.debug("requested_endDatetime not given, using the last value of timeAxis = %f", e.data[0])
tf = np.array([])
if getattr(self, 'command_line_args', False):
if self.command_line_args.append:
# Exclusive of s, as that is the max timevalue in the database for the Activity
self.logger.info(f"--append specified. Finding start index where time > {s}")
tf = (s < timeAxis) & (timeAxis <= e)
if not tf.any():
# Inclusive of the specified start time
tf = (s <= timeAxis) & (timeAxis <= e)
# Numpy Array tf has True values at indices corresponding to the data we need to load
tIndx = np.nonzero(tf == True)[0]
if tIndx.size == 0:
raise NoValidData(f'No time values from {self.url} between time values {s} and {e}')
elif tIndx.size == 1:
# Loading a single value
tIndx = np.array([tIndx[0], tIndx[0]])
try:
indices = (tIndx[0], tIndx[-1] + 1)
except IndexError:
raise NoValidData('Could not get first and last indexes from tIndex = %s. Skipping.' % (tIndx))
self.logger.info('Start and end indices are: %s', indices)
if indices[1] <= indices[0]:
raise InvalidSliceRequest('Cannot issue DAP temporal constraint expression of non-positive slice: indices = {indices}')
return indices
def getTotalRecords(self):
'''
For the url count all the records that are to be loaded from all the include_names and return it.
Computes the sum of the product of the time slice and the rest of the elements of the shape.
'''
pcount = 0
count = 0
numDerived = 0
trajSingleParameterCount = 0
for name in self.include_names:
try:
tIndx = self.getTimeBegEndIndices(self.ds[self.coord_dicts[name]['time']])
except KeyError:
self.logger.debug('Ignoring parameter: %s', name)
except InvalidSliceRequest:
self.logger.warn('No valid data for parameter: %s', name)
continue
except KeyError as e:
self.logger.warn("%s: Skipping", e)
continue
try:
if self.getFeatureType() == TRAJECTORY:
try:
trajSingleParameterCount = np.prod(self.ds[name].shape[1:] + (np.diff(tIndx)[0],))
except AttributeError:
# Likely using pydap 3.2+
trajSingleParameterCount = np.prod(self.ds[name].array.shape[1:] + (np.diff(tIndx)[0],))
try:
pcount = (np.prod(self.ds[name].shape[1:] + (np.diff(tIndx)[0],)) / self.stride)
count += pcount
except AttributeError:
# Likely using pydap 3.2+
pcount = (np.prod(self.ds[name].array.shape[1:] + (np.diff(tIndx)[0],)) / self.stride)
count += pcount
except KeyError as e:
if self.getFeatureType() == TRAJECTORY:
# Assume that it's a derived variable and add same count as
self.logger.debug("%s: Assuming it's a derived parameter", e)
numDerived += 1
self.logger.info(f'Count of parameter {name:20}: {int(pcount):7d}')
self.logger.debug('Adding %d derived parameters of length %d to the count', numDerived, trajSingleParameterCount / self.stride)
if trajSingleParameterCount:
count += (numDerived * trajSingleParameterCount / self.stride)
return count
def _equal_coords(self, load_groups, coor_groups):
'''Peek at the data in the axes and mark with True values those elements that match.
This is a special fix for realtime LRAUV data from shore_i.nc files.
Tested with:
1. Initial short mission
http://dods.mbari.org/opendap/data/lrauv/whoidhs/realtime/sbdlogs/2019/201906/20190609T194744/shore_i.nc
2. Unequal array lengths
http://dods.mbari.org/opendap/data/lrauv/whoidhs/realtime/sbdlogs/2019/201906/20190609T202208/shore_i.nc
3. Very unequal lengths, pad with 41 zeros; fails with duplicate key value
http://dods.mbari.org/opendap/data/lrauv/whoidhs/realtime/sbdlogs/2019/201906/20190612T024430/shore_i.nc
4. Horrendously bad result with coordinates and data represented badly in STOQS UI section plots.
(Implemented temporary fix by not loading salinity; problem occurs with loading both temperature & salinity)
http://dods.mbari.org/opendap/data/lrauv/makai/realtime/sbdlogs/2020/202010/20201008T014813/shore_i.nc
The role of this method is to identify truly equal coordinates of variables to be loaded for the
calling routine to determine whether a bulk_create() may be done or whether the variables need to be
loaded the old fashioned (slower) way - one element at a time, reusing previously loaded coordinates.
N.B.: In Janurary 2022 stoqs/loaders/CANON/toNetCDF/lrauvNc4ToNetcdf.py was modified to re-interpolate
the decimated data to '2S' frequency and use common coordinate axes for all variables - so this finction
isn't really needed for those new shore_i.nc files.
'''
coord_equals = {}
if len(coor_groups) == 1:
self.logger.info(f"Single set of coordinates as would be found in a modern shore_i.nc file")
return coord_equals
for count, (axes, ac) in enumerate(coor_groups.items()):
self.logger.info(f"Initializing coord_equals to all False for axes {axes}")
self.logger.info(f"Number of {ac[TIME]} values: {len(self.ds[ac[TIME]])}")
coord_equals[axes] = np.full(len(self.ds[ac[TIME]]), False)
variable = load_groups[axes][0]
if count > 0:
if len(last_times) < len(self.ds[ac[TIME]]):
self.logger.info(f"len(last_times) ({len(last_times)}) < len(self.ds[ac[TIME]]) ({len(self.ds[ac[TIME]])})")
num_pad = len(self.ds[ac[TIME]]) - len(last_times)
self.logger.info(f"Padding last_ coordinate arrays with {num_pad} zero(s) to match (taking a chance) the self.ds coordinate arrays")
last_times = np.pad(last_times, [(0, num_pad)], mode='constant', constant_values=0)
last_depths = np.pad(last_depths, [(0, num_pad)], mode='constant', constant_values=0)
last_latitudes = np.pad(last_latitudes, [(0, num_pad)], mode='constant', constant_values=0)
last_longitudes = np.pad(last_longitudes, [(0, num_pad)], mode='constant', constant_values=0)
if len(last_times) > len(self.ds[ac[TIME]]):
self.logger.warn(f"len(last_times) ({len(last_times)}) > len(self.ds[ac[TIME]]) ({len(self.ds[ac[TIME]])})")
self.logger.warn(f"Not Padding self.ds arrays - not able to attempt a fix")
continue
self.logger.debug(f"Comparing coords with those from {last_variables}")
times_equal = np.equal(last_times, self.ds[ac[TIME]])
self.logger.debug(f" {variable} times: {times_equal}")
self.logger.debug(f" {list(last_times[:])}")
self.logger.debug(f" {list(self.ds[ac[TIME]])}")
depths_equal = np.equal(last_depths, self.ds[ac[DEPTH]][ac[DEPTH]])
self.logger.debug(f" {variable} depths: {depths_equal}")
self.logger.debug(f" {list(last_depths[:])}")
self.logger.debug(f" {list(self.ds[ac[DEPTH]][ac[DEPTH]])}")
latitudes_equal = np.equal(last_latitudes, self.ds[ac[LATITUDE]][ac[LATITUDE]])
self.logger.debug(f" {variable} latitudes: {latitudes_equal}")
self.logger.debug(f" {list(last_latitudes[:])}")
self.logger.debug(f" {list(self.ds[ac[LATITUDE]][ac[LATITUDE]])}")
longitudes_equal = np.equal(last_longitudes, self.ds[ac[LONGITUDE]][ac[LONGITUDE]])
self.logger.debug(f" {variable} longitudes: {longitudes_equal}")
self.logger.debug(f" {list(last_longitudes[:])}")
self.logger.debug(f" {list(self.ds[ac[LONGITUDE]][ac[LONGITUDE]])}")
coord_equals[axes] = np.logical_and(np.logical_and(times_equal,
depths_equal),
np.logical_and(latitudes_equal,
longitudes_equal))
self.logger.debug(f" {variable} .logical_and(): {coord_equals[axes]}")
last_times = self.ds[ac[TIME]]
last_depths = self.ds[ac[DEPTH]][ac[DEPTH]]
last_latitudes = self.ds[ac[LATITUDE]][ac[LATITUDE]]
last_longitudes = self.ds[ac[LONGITUDE]][ac[LONGITUDE]]
last_variables = load_groups[axes]
return coord_equals
def get_load_structure(self):
'''Return data structure organized by Parameters with common coordinates.
This supports the use of bulk_create() to speed the loading of data.
'''
ac = {}
load_groups = defaultdict(list)
coor_groups = {}
for pname in self.include_names:
if pname not in list(self.ds.keys()):
self.logger.debug('include_name %s not in dataset %s', pname, self.url)
continue
ac[pname] = self.coord_dicts[pname]
try:
load_groups[''.join(sorted(list(ac[pname].values())))].append(pname)
coor_groups[''.join(sorted(list(ac[pname].values())))] = ac[pname]
except TypeError:
# Likely "TypeError: '<' not supported between instances of 'float' and 'str'" because depth = 0.0 in auxCoords
self.logger.debug(f'Number likely in auxCoords rather than a coordinate name, convert to string for group_name')
group_name = ''
for v in ac[pname].values():
group_name += str(v)
self.logger.debug(f'group_name = {group_name}')
load_groups[group_name].append(pname)
coor_groups[group_name] = ac[pname]
return load_groups, coor_groups
def _ips(self, mtimes):
for i, mt in enumerate(mtimes):
if mt:
yield InstantPoint(activity=self.activity, timevalue=mt)
else:
self.logger.debug(f"Bad timevalue from {self.url} at index {i}")
yield None
def _meass(self, depths, longitudes, latitudes):
for i, (de, lo, la) in enumerate(zip(depths, longitudes, latitudes)):
# Accept depths that are 0.0, but not latitudes and longitudes that are zero
if de is not None and lo and la:
yield Measurement(depth=repr(de), geom=Point(float(lo), float(la)))
else:
self.logger.debug(f"Bad coordinate from {self.url} at index {i}")
yield None
def _find_dup_coords(self, ips, meass, coords_equal):
for index, (ip, meas) in enumerate(zip(ips, meass)):
if meas:
try:
measurement = Measurement.objects.using(self.dbAlias).filter(depth=meas.depth,
geom=meas.geom,
instantpoint=ip)
self.logger.info(f"Adding index {index} to coords_equal for meas = {meas}")
coords_equal[index] = True
except Measurement.DoesNotExist:
continue
return coords_equal
def _all_coords_equal(self, tindx, ac, pnames, axes, coords_equal=np.array([])):
'''If duplicate coordinant found in database then this is for testing whether
all the coordinates in the data to be loaded are identical with an Activity
already in the database. Initially implemented to add plankton_proxy data
to an existing Dorado Activity.
'''
def _read_coords_from_ds(self, tindx, ac, multidim_trajectory=False):
'''Initial implementations assume a single trajectory in each netCDF file.
With adoption of CF-1.7 " It is strongly recommended that there always be a
trajectory variable (of any data type) with the attribute cf_role=”trajectory_id”
attribute, whose values uniquely identify the trajectories."
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#trajectory-data
The multidim_trajectory flag is for indicating a netCDF file that has a
Multidimensional array representation of trajectories.
'''
if multidim_trajectory:
# TODO: Deal with (as yet unseen) case where multiple trajectories exist in a netCDF file
times = self.ds[ac[TIME]][0][0][tindx[0]:tindx[-1]:self.stride]
else:
times = self.ds[ac[TIME]][tindx[0]:tindx[-1]:self.stride]
time_units = self.ds[ac[TIME]].units.lower().replace('utc', 'UTC')
if self.ds[ac[TIME]].units == 'seconds since 1970-01-01T00:00:00Z':
time_units = 'seconds since 1970-01-01 00:00:00' # coards doesn't like ISO format
try:
if times.shape[0] > 0:
mtimes = (from_udunits(mt, time_units) for mt in times)
except IndexError:
# Trap case where times.shape = () giving opportunity to turn a single value into a list
mtimes = [from_udunits(float(times.data), time_units)]
try:
if isinstance(self.ds[ac[DEPTH]], pydap.model.GridType):
depths = self.ds[ac[DEPTH]][ac[DEPTH]][tindx[0]:tindx[-1]:self.stride]
else:
depths = self.ds[ac[DEPTH]][tindx[0]:tindx[-1]:self.stride]
except KeyError:
# Allow for variables with no depth coordinate to be loaded at the depth specified in auxCoords
if ac[DEPTH] in self.ds:
if isinstance(ac[DEPTH], (int, float)):
depths = ac[DEPTH] * np.ones(len(times))
else:
self.logger.warn(f'No depth coordinate {ac[DEPTH]} in {self.ds}')
if isinstance(ac[DEPTH], (int, float)):
if multidim_trajectory:
self.logger.info('Overridden in auxCoords: ac[DEPTH] = {ac[DEPTH]}, setting depths to [{ac[DEPTH]}] * len(times)')
depths = [ac[DEPTH]] * len(times)
else:
self.logger.info('Overridden in auxCoords: ac[DEPTH] = {ac[DEPTH]}, setting depths to [{ac[DEPTH]}]')
depths = [ac[DEPTH]]
if isinstance(self.ds[ac[LATITUDE]], pydap.model.GridType):
latitudes = self.ds[ac[LATITUDE]][ac[LATITUDE]][tindx[0]:tindx[-1]:self.stride]
elif multidim_trajectory:
# TODO: Deal with (as yet unseen) case where multiple trajectories exist in a netCDF file
latitudes = self.ds[ac[LATITUDE]][0][0][tindx[0]:tindx[-1]:self.stride]
else:
latitudes = self.ds[ac[LATITUDE]][tindx[0]:tindx[-1]:self.stride]
try:
if latitudes.shape[0] > 0:
pass
except IndexError:
# Trap case where latitudes.shape = () giving opportunity to turn a single value into a list
latitudes = [float(latitudes.data)]
if isinstance(self.ds[ac[LONGITUDE]], pydap.model.GridType):
longitudes = self.ds[ac[LONGITUDE]][ac[LONGITUDE]][tindx[0]:tindx[-1]:self.stride]
elif multidim_trajectory:
# TODO: Deal with (as yet unseen) case where multiple trajectories exist in a netCDF file
longitudes = self.ds[ac[LONGITUDE]][0][0][tindx[0]:tindx[-1]:self.stride]
else:
longitudes = self.ds[ac[LONGITUDE]][tindx[0]:tindx[-1]:self.stride]
try:
if longitudes.shape[0] > 0:
pass
except IndexError:
# Trap case where longitudes.shape = () giving opportunity to turn a single value into a list
longitudes = [float(longitudes.data)]
return mtimes, depths, latitudes, longitudes
def _load_coords_from_dsg_ds(self, tindx, ac, pnames, axes, coords_equal=np.array([]), multidim_trajectory=False):
'''Pull coordinates from Discrete Sampling Geometry NetCDF dataset,
(with accomodations made so that it works as well for EPIC conventions)
and bulk create in the database. Retain None values for bad coordinates.
'''
mtimes, depths, latitudes, longitudes = self._read_coords_from_ds(tindx, ac, multidim_trajectory=multidim_trajectory)
self.logger.debug(f'Getting good_coords for {pnames}...')
mtimes, depths, latitudes, longitudes, dup_times = zip(*self.good_coords(
pnames, mtimes, depths, latitudes, longitudes, coords_equal))
# Reassign meass with Measurement objects that have their id set
try:
meass, mask = self._bulk_load_coordinates(self._ips(mtimes), self._meass(
depths, longitudes, latitudes),
dup_times, ac, axes)
except (UniqueViolation, IntegrityError) as e:
# Likely a realtime LRAUV load with a coord already loaded - add the dup to coords_equal
self.logger.info(f"{e}: Trying _bulk_load_coordinates() again after _find_dup_coords()")
coords_equal = self._find_dup_coords(self._ips(mtimes), self._meass(
depths, longitudes, latitudes),
coords_equal)
mtimes, depths, latitudes, longitudes, dup_times = zip(*self.good_coords(
pnames, mtimes, depths, latitudes, longitudes, coords_equal))
meass, mask = self._bulk_load_coordinates(self._ips(mtimes), self._meass(
depths, longitudes, latitudes),
dup_times, ac, axes)
return meass, dup_times, mask
def _load_coords_from_instr_ds(self, tindx, ac):
'''Pull time coordinate from Instrument (time-coordinate-only) NetCDF dataset (e.g. LOPC),
lookup matching Measurment (containing depth, latitude, and longitude) and bulk create
Instantpoints and Measurements in the database.
'''
meass_nodups = []
try:
times = self.ds[ac[TIME]][tindx[0]:tindx[-1]:self.stride]
except ValueError:
self.logger.warn(f'Stride of {self.stride} likely greater than range of data: {tindx[0]}:{tindx[-1]}')
self.logger.warn(f'Skipping load of {self.url}')
return meass_nodups
time_units = self.ds[ac[TIME]].units.lower().replace('utc', 'UTC')
if self.ds[ac[TIME]].units == 'seconds since 1970-01-01T00:00:00Z':
timeUnits = 'seconds since 1970-01-01 00:00:00' # coards doesn't like ISO format
mtimes = (from_udunits(mt, time_units) for mt in times)
warn_secs_diff = 2
noload_secs_diff = 60
ips = []
meass = []
warn_count = 0
noload_count = 0
for mt in mtimes:
try:
ip, secs_diff = get_closest_instantpoint(self.associatedActivityName, mt, self.dbAlias)
except ClosestTimeNotFoundException as e:
self.logger.error('Could not find corresponding measurment for LOPC data measured at %s', mt)
else:
if secs_diff > noload_secs_diff:
noload_count += 1
self.logger.debug(f"{noload_count:3d}. LOPC data at {mt.strftime('%Y-%m-%d %H:%M:%S')} not loaded - more than "
f"{noload_secs_diff} secs away from existing measurement: {secs_diff}")
continue
if secs_diff > warn_secs_diff:
warn_count += 1
self.logger.debug(f"{warn_count:3d}. LOPC data at {mt.strftime('%Y-%m-%d %H:%M:%S')} more than "
f"{warn_secs_diff} secs away from existing measurement: {secs_diff}")
meass.append(Measurement.objects.using(self.dbAlias).get(instantpoint=ip))
self.logger.warn(f"{noload_count} of {len(times)} original LOPC measurements not loaded because they "
f"were more than {noload_secs_diff} seconds away from an existing measurement")
self.logger.warn(f"{warn_count} of {len(meass)} collected LOPC measurements were more than "
f"{noload_secs_diff} seconds away from an existing measurement")
if not meass:
return meass_nodups
# Remove duplicates leaving the meass_nodups ordered in time
duplicates_removed = -1
meass_nodups.append(meass[0])
last_meas = meass[0]
for meas in meass:
if meas.instantpoint.timevalue > last_meas.instantpoint.timevalue:
meass_nodups.append(meas)
else:
duplicates_removed += 1
last_meas = meas
self.logger.info(f'{duplicates_removed} duplicate Measurements removed')
return meass_nodups
def _good_value_generator(self, pname, values):
'''Generate good data values where bad values and nans are replaced consistently with None
'''
for value in values:
if self.is_value_bad(pname, value):
value = None
yield value
def _mask_data(self, vd, vm):
# Yield only good values (not masked)
good_count = 0
for i, (v, m) in enumerate(zip(vd, vm)):
if not m:
yield v
good_count += 1
else:
self.logger.debug(f"Removing bad data value at index {i}")
if good_count == 0:
self.logger.warning(f"No good data yielded. Coordinate values in {self.url} are likely bad.")
def _meass_from_activity(self, add_to_activity, tindx, ac):
'''Retreive Measurements from existing Activity and confirm that the coordinates
are identical to what's in the netCDF we are loading from. Initially developed
for Dorado plankton_proxies data.
'''
meass = (Measurement.objects.using(self.dbAlias).filter(instantpoint__activity=add_to_activity)
.order_by('instantpoint__timevalue'))
dup_times = [False] * meass.count()
mask = [False] * meass.count()
unequal_ti = unequal_de = unequal_la = unequal_lo = 0
for count, (meas, mt, de, la, lo) in enumerate(zip(meass, *self._read_coords_from_ds(tindx, ac))):
if meas.instantpoint.timevalue != mt:
ti_msg = f"Existing timevalue ({meas.instantpoint.timevalue}) != mt ({mt}) at index {count}"
self.logger.debug(ti_msg)
unequal_ti += 1
if unequal_ti == 1:
first_ti_msg = ti_msg
if not np.isclose(meas.depth, de):
de_msg = f"Existing depth ({meas.depth}) != de ({de}) at index {count}"
self.logger.debug(de_msg)
unequal_de += 1
if not np.isclose(meas.geom.y, la):
la_msg = f"Existing latitude ({meas.geom.y}) != la ({la}) at index {count}"
self.logger.debug(la_msg)
unequal_la += 1
if not np.isclose(meas.geom.x, lo):
lo_msg = f"Existing longitude ({meas.geom.x}) != lo ({lo}) at index {count}"
self.logger.debug(lo_msg)
unequal_lo += 1
if unequal_ti:
self.logger.error(f"Encountered {unequal_ti} unequal_ti when adding data from {self.url} to Activity {add_to_activity}")
self.logger.error(f"First time mismatch: {first_ti_msg}")
self.logger.error(f"Last time mismatch: {ti_msg}")
return meass, dup_times, mask
def load_trajectory(self, add_to_activity=None):
'''Stream trajectory data directly from pydap proxies to generators fed to bulk_create() calls
'''
multidim_trajectory = False
load_groups, coor_groups = self.get_load_structure()
coords_equal_hash = {}
if 'shore_i.nc' in self.url:
try:
# Variables from same NetCDF4 group in realtime LRAUV data have different axis names,
# but same coord values. Find them to not load duplicate measurements.
coords_equal_hash = self._equal_coords(load_groups, coor_groups)
except ValueError as e:
self.logger.warning(f"Skipping {self.url}: {e}")
total_loaded = 0
mask = []
for axis_count, (k, pnames) in enumerate(load_groups.items()):
ac = coor_groups[k]
try:
if len(self.ds[ac[TIME]].shape) == 2:
multidim_trajectory = True
# TODO: Deal with (as yet unseen) case where multiple trajectories exist in a netCDF file
tindx = self.getTimeBegEndIndices(self.ds[ac[TIME]][0][0])
else:
tindx = self.getTimeBegEndIndices(self.ds[ac[TIME]])
except (InvalidSliceRequest, NoValidData) as e:
self.logger.warn(f"{e}")
self.logger.warn(f'Failed to getTimeBegEndIndices() for axes {k} from {self.url}')
continue
for i, pname in enumerate(pnames):
self.logger.debug(f'{i}, {pname}')
if i == 0:
# First time through, bulk load the coordinates: instant_points and measurements
if DEPTH not in ac:
self.logger.warn(f'{self.param_by_key[pname]} does not have {DEPTH} in {ac}. Skipping.')
continue
if ac[DEPTH] not in self.ds and isinstance(ac[DEPTH], (int, float)):
# Likely u and v parameters from nemesis glider data where there is no depth_uv coordinate in the NetCDF
self.logger.info(f'{self.param_by_key[pname]} does not have {DEPTH} in {self.url}.')
self.logger.info(f'ac[DEPTH] = {ac[DEPTH]}. Assume that this depth coordinate was provided in auxCoords')
self.logger.info(f'Loading coordinates for axes {k}')
meass, dup_times, mask = self._load_coords_from_dsg_ds(tindx, ac, pnames, k, multidim_trajectory=multidim_trajectory)
elif ac[DEPTH] in self.ds and ac[LATITUDE] in self.ds and ac[LONGITUDE] in self.ds:
try:
# Expect CF Discrete Sampling Geometry or EPIC dataset
self.logger.info(f'Loading coordinates for axes {k}')
if coords_equal_hash == {}:
if add_to_activity:
meass, dup_times, mask = self._meass_from_activity(add_to_activity, tindx, ac)
else:
meass, dup_times, mask = self._load_coords_from_dsg_ds(tindx, ac, pnames, k)
else:
meass, dup_times, mask = self._load_coords_from_dsg_ds(tindx, ac, pnames, k, coords_equal_hash[k])
except CoordNotEqual as e:
self.logger.exception(e)
sys.exit(-1)
except ValueError as e:
# Likely ValueError: not enough values to unpack (expected 5, got 0) from good_coords()
self.logger.debug(str(e))
self.logger.warn(f'No good coordinates for {pname} - skipping it')
continue
except OverflowError as e:
# Likely unable to convert a udunit to a value as in time from:
# http://legacy.cencoos.org:8080/thredds/dodsC/gliders/Line66/Nemesis/nemesis_201705/nemesis_20170518T203246_rt0.nc.ascii?time[149:1:149]
# = -4.31865376e+107 (should be a value like 1.495143822559231E9)
self.logger.debug(str(e))
self.logger.warn(f'OverflowError when converting coordinates for {pname} - skipping it')
return total_loaded
else:
# Expect instrument (time-coordinate-only) dataset
self.logger.warn(f'{pname} has no {ac[DEPTH]} coordinate - processing as time-coordinate-only, e.g. LOPC')
meass = self._load_coords_from_instr_ds(tindx, ac)
else:
# Parameters after the first one
if k in coords_equal_hash:
if coords_equal_hash[k].all():
# For follow-on Parameters using same axes, pass in equal coordinates boolean array
meass, dup_times, mask = self._load_coords_from_dsg_ds(tindx, ac, pnames, k, coords_equal_hash[k])
else:
# Load Parameter one element at a time - the old fashioned (slower) way
self.logger.warning(f"Parameter {pname} does not share the same coordinates of previously loaded Parameters, skipping for now.")
self.logger.debug(f"coords_equal_hash[{k}] = {coords_equal_hash[k]}")
continue
# TODO: Implement one element at a time loader method
try:
if isinstance(self.ds[pname], pydap.model.GridType):
constraint_string = f"(GridType) using python slice: ds['{pname}']['{pname}'][{tindx[0]}:{tindx[-1]}:{self.stride}]"
values = self.ds[pname][pname].data[tindx[0]:tindx[-1]:self.stride]
elif multidim_trajectory:
self.logger.info(f"(multidim) loading {pname} from multidimensional trajectory file")
constraint_string = f"using python slice: ds['{pname}'][0][0][{tindx[0]}:{tindx[-1]}:{self.stride}]"
# TODO: Deal with (as yet unseen) case where multiple trajectories exist in a netCDF file
values = self.ds[pname].data[0][0][tindx[0]:tindx[-1]:self.stride]
else:
constraint_string = f"(default) using python slice: ds['{pname}'][{tindx[0]}:{tindx[-1]}:{self.stride}]"
values = self.ds[pname].data[tindx[0]:tindx[-1]:self.stride]
except ValueError:
self.logger.warn(f'Stride of {self.stride} likely greater than range of data: {tindx[0]}:{tindx[-1]}')
self.logger.warn(f'Skipping load of {self.url}')
return total_loaded
# Test whether we need to make values iterable
try:
self.logger.debug(f"len(values) = {len(values)}")
except TypeError:
# Likely values is a single valued array, e.g. nemesis u, v data
values = [float(values)]
if mask:
# Mask the values and dup_times where coordinates are bad
# Need values as a list() because of LOPC test below
values = list(self._mask_data(values, mask))
if not values:
self.logger.warning(f'Coordinates likely bad - check them here:')
self.logger.warning(f"Depth data: {self.url}.ascii?{ac[DEPTH]}[{tindx[0]}:{self.stride}:{tindx[-1] - 1}]")
self.logger.warning(f"Latitude data: {self.url}.ascii?{ac[LATITUDE]}[{tindx[0]}:{self.stride}:{tindx[-1] - 1}]")
self.logger.warning(f"Longitude data: {self.url}.ascii?{ac[LONGITUDE]}[{tindx[0]}:{self.stride}:{tindx[-1] - 1}]")
return total_loaded
self.logger.info(f"Time data: {self.url}.ascii?{ac[TIME]}[{tindx[0]}:{self.stride}:{tindx[-1] - 1}]")
if hasattr(values[0], '__iter__'):
# For data like LOPC data - expect all values to be non-nan, load array and the sum of it
self.param_by_key[pname].description = 'Sum of counts saved in datavalue, spectrum of counts saved in dataarray'
self.param_by_key[pname].save(using=self.dbAlias)
mps = (MeasuredParameter(measurement=me, parameter=self.param_by_key[pname],
dataarray=list(va), datavalue=sum(va))
for me, va in zip(meass, values))
else:
# Need to bulk_create() all values, set bad ones to None and remove them after insert
values = self._good_value_generator(pname, values)
mps = (MeasuredParameter(measurement=me, parameter=self.param_by_key[pname],
datavalue=va) for me, va, dt in zip(
meass, values, dup_times) if not dt)
# All items but meass are generators, so we can call len() on it
self.logger.info(f'Bulk loading {len(meass)} {self.param_by_key[pname]} datavalues into MeasuredParameter {constraint_string} with batch_size = {BATCH_SIZE}')
mps = self._measuredparameter_with_measurement(meass, mps)
mps = MeasuredParameter.objects.using(self.dbAlias).bulk_create(mps, batch_size=BATCH_SIZE)
self.parameter_counts[self.param_by_key[pname]] = len(mps)
total_loaded += len(mps)
return total_loaded
def _convert_EPIC_times(self, times, tindx):
# Create COARDS time from EPIC data
time2s = self.ds['time2']['time2'].data[tindx[0]:tindx[-1]:self.stride]
time_units = 'seconds since 1970-01-01 00:00:00'
epoch_seconds = []
for jd, ms in zip(times, time2s):
gcal = jd2gcal(jd - 0.5, ms / 86400000.0)
try:
gcal_datetime = datetime(*gcal[:3]) + timedelta(days=gcal[3])
except ValueError as e:
# Encountered this error after removing start & end times for the load on this dataset:
# http://dods.mbari.org/opendap/data/CCE_Archive/MS3/20151005/Aquadopp2000/MBCCE_MS3_Aquadopp2000_20151005.nc.ascii?time[93900:1:94100]
self.logger.debug(f"{e} in {self.url}")
epoch_seconds.append(to_udunits(gcal_datetime, time_units))
return epoch_seconds, time_units
def load_timeseriesprofile(self):
'''Stream timeseriesprofile data directly from pydap proxies to generators fed to bulk_create() calls.
Used also for timeseries data.
'''
time_axes_loaded = set()
depth_axes_loaded = set()
load_groups, coor_groups = self.get_load_structure()
for k, pnames in load_groups.items():
ac = coor_groups[k]
total_loaded = 0
for i, pname in enumerate(pnames):
if i == 0:
# First time through, bulk load the coordinates: instant_points and measurements
# As all pnames share the same coordinates we can use pnames[0] to access them
firstp = pnames[0]
if ac[TIME] != list(self.ds[firstp].keys())[1]:
# Gratuitous check
self.logger.warn("Auxillary time coordinate '{ac[TIME]}' != first COARDS"
"coordnate '{list(self.ds[firstp].keys())[1]}'")
# CF (nee COARDS) has tzyx coordinate ordering, time is at index [1] and depth is at [2]
# - times: Assume CF/COARDS, override if EPIC data detected
tindx = self.getTimeBegEndIndices(self.ds[list(self.ds[firstp].keys())[1]])
try:
times = self.ds[list(self.ds[firstp].maps.keys())[0]].data[tindx[0]:tindx[-1]:self.stride]
except ValueError as e:
# Likely 'not enough values to unpack' because of self.stride exceeding range
self.logger.warn(f"{e}. Stride value of {self.stride} is likely too high.")
self.logger.warn(f"Skipping all parameters in coor_group {ac}")
continue
time_units = self.ds[list(self.ds[firstp].maps.keys())[0]].units.lower()
if time_units == 'true julian day': # pragma: no cover
times, time_units = self._convert_EPIC_times(times, tindx)
time_units = time_units.replace('utc', 'UTC') # coards requires UTC in uppercase
if self.ds[list(self.ds[firstp].maps.keys())[0]].units == 'seconds since 1970-01-01T00:00:00Z':
time_units = 'seconds since 1970-01-01 00:00:00' # coards 1.0.4 and earlier doesn't like ISO format
mtimes = [from_udunits(mt, time_units) for mt in times]
# 1. - depths: first by CF/COARDS coordinate rules, then by EPIC conventions
nomDepths = None
nomLat = None
nomLon = None
try:
depths = self.ds[list(self.ds[firstp].maps.keys())[1]].data[:] # TODO lookup more precise depth from conversion from pressure
except IndexError:
self.logger.warn(f'Variable {firstp} has less than 2 coordinates: {self.ds[pname].keys()}')
depths = np.array([])
# If data aren't COARDS then index 2 will not be depths, but could be latitude, detect by testing length & auxCoords
if len(depths) == 1 and 'depth' not in ac:
try:
self.logger.info('Attempting to set nominal depth from EPIC Convention sensor_depth variable attribute')
depths = np.array([self.ds[firstp].attributes['sensor_depth']])
except KeyError:
self.logger.info('Variable %s does not have a sensor_depth attribute', firstp)
elif not depths.any():
self.logger.warn('Depth coordinate not found at index [2]. Looking for nominal position from EPIC Convention global attributes.')
try:
depths = np.array([float(self.ds.attributes['NC_GLOBAL']['nominal_instrument_depth'])])
nomLat = self.ds.attributes['NC_GLOBAL']['latitude']
nomLon = self.ds.attributes['NC_GLOBAL']['longitude']
except KeyError:
self.logger.warn('EPIC nominal position not found in global attributes. Assigning from variables (and maybe variable attribute).')
if 'depth' in self.ds:
if not hasattr(self.ds['depth'].data[0], '__iter__'):
depths = np.array([self.ds['depth'].data[0]])
if 'nominal_instrument_depth' in self.ds[firstp].attributes:
nomDepths = self.ds[firstp].attributes['nominal_instrument_depth']
if 'lat' in self.ds:
nomLat = self.ds['lat'].data[0][0]
if 'lon' in self.ds:
nomLon = self.ds['lon'].data[0][0]
if nomDepths and nomLat and nomLon:
pass
elif depths.any() and nomLat and nomLon:
self.logger.info('Nominal position assigned from EPIC Convention global attributes')
nomDepths = depths
elif depths.any():
self.logger.info('Nominal depth assigned from EPIC Convention variable attributes')
nomDepths = depths
nom_loc = self.getNominalLocation()
nomLat, nomLon = nom_loc[1][firstp], nom_loc[2][firstp]
else:
# Possible to have both precise and nominal locations with this approach
nom_loc = self.getNominalLocation()
nomDepths, nomLat, nomLon = nom_loc[0][firstp], nom_loc[1][firstp], nom_loc[2][firstp]
# Ensure that nomDepths is a numpy array
if not hasattr(nomDepths, '__iter__'):
nomDepths = np.array([nomDepths])
try:
_ = nomDepths.any()
except AttributeError:
nomDepths = np.array(nomDepths)
# 2 & 3. - latitudes & longitudes: first by CF/COARDS coordinate rules, then by EPIC conventions
shape_length = self.get_shape_length(firstp)
if shape_length == 4:
self.logger.info('%s has shape of 4, assume that singleton dimensions are used for nominal latitude and longitude', firstp)
# Would like all data set to have COARDS coordinate ordering, but they don't
# - http://dods.mbari.org/opendap/data/CCE_Archive/MS1/20151006/TU65m/MBCCE_MS1_TU65m_20151006.nc.html - has COARDS ordering
# - http://dods.mbari.org/opendap/data/CCE_Archive/MS2/20151005/ADCP300/MBCCE_MS2_ADCP300_20151005.nc - does not have COARDS ordering!
if ac['latitude'] in self.ds[ac['latitude']]:
# Precise GPS latitude positions
latitudes = self.ds[ac['latitude']][ac['latitude']].data[:]
else:
latitudes = float(self.ds[list(self.ds[firstp].maps.keys())[2]].data[0])
if ac['longitude'] in self.ds[ac['longitude']]:
# Precise GPS longitude positions
longitudes = self.ds[ac['longitude']][ac['longitude']].data[:]
else:
longitudes = float(self.ds[list(self.ds[firstp].maps.keys())[3]].data[0])
elif shape_length == 3 and 'EPIC' in self.ds.attributes['NC_GLOBAL']['Conventions'].upper(): # pragma: no cover
# Special fix for USGS EPIC ADCP variables missing depth coordinate, but having nominal sensor depth metadata
# - http://dods.mbari.org/opendap/data/CCE_Archive/MS1/20151006/ADCP300/MBCCE_MS1_ADCP300_20151006.nc - does not have COARDS ordering!
latitudes = float(self.ds[list(self.ds[firstp].maps.keys())[1]].data[0]) # TODO lookup more precise gps lat via coordinates pointing to a vector
longitudes = float(self.ds[list(self.ds[firstp].maps.keys())[2]].data[0]) # TODO lookup more precise gps lon via coordinates pointing to a vector
depths = nomDepths
elif shape_length == 2:
self.logger.info('%s has shape of 2, assuming no latitude and longitude singletime'
' dimensions. Using nominal location read from auxillary coordinates', firstp)
longitudes = nomLon
latitudes = nomLat
elif shape_length == 1:
self.logger.info('%s has shape of 1, assuming no latitude, longitude, and'
' depth singletime dimensions. Using nominal location read'
' from auxially coordinates', firstp)
longitudes = nomLon
latitudes = nomLat
depths = nomDepths
else:
raise Exception('{} has shape of {}. Can handle only shapes of 2, and 4'.format(firstp, shape_length))
if hasattr(latitudes, '__iter__') and hasattr(longitudes, '__iter__'):
# We have precise gps positions, a location for each time value
points = []
for i, (lo, la) in enumerate(zip(longitudes, latitudes)):
if (lo == self.ds[ac['longitude']].attributes['_FillValue'] or
lo == self.ds[ac['longitude']].attributes['missing_value'] or
la == self.ds[ac['latitude']].attributes['_FillValue'] or
la == self.ds[ac['latitude']].attributes['missing_value']):
self.logger.debug(f"Not using missing or fill value at index {i}: lo, la = {lo}, {la}")
else:
points.append(Point(lo, la))
else:
if abs(latitudes) > 90:
# Brute-force fix for non-COARDS ordering, swap the coordinates
self.logger.info('%s appears to not have COARDS ordering of coordinate dimensions, swapping them', firstp)
tmp_var = latitudes
latitudes = longitudes
longitudes = tmp_var
self.logger.debug(f"Making points list from {(longitudes, latitudes)} for each {len(list(mtimes))} mtimes")
points = [Point(float(longitudes), float(latitudes)) for i in range(len(list(mtimes)))]
# Need a set of points for all the timeseriesprofile depths
points = points * len(list(depths))
ips = (InstantPoint(activity=self.activity, timevalue=mt) for mt in mtimes)
try:
self.logger.info(f'Calling bulk_create() for InstantPoints in ips generator for firstp = {firstp} with batch_size = {BATCH_SIZE}')
ips = InstantPoint.objects.using(self.dbAlias).bulk_create(ips, batch_size=BATCH_SIZE)
except (IntegrityError, psycopg2.IntegrityError) as e:
self.logger.info(f"Time axis '{ac[TIME]}' likely has timevalues already loaded from an axis in {time_axes_loaded}")
self.logger.info(f'Getting matching InstantPoints from the database, creating new ones not yet there.')
ips_new = []
num_created = 0
for ip in (InstantPoint(activity=self.activity, timevalue=mt) for mt in mtimes):
ip_db, created = InstantPoint.objects.using(self.dbAlias).get_or_create(
activity=self.activity, timevalue=ip.timevalue)
if created:
num_created += 1
ips_new.append(ip_db)
ips = ips_new
self.logger.info(f'Got {len(ips) - num_created} InstantPoints from the database, created {num_created} new ones')
if not ips:
self.logger.error(f'Unable to load load InstantPoints for axis {ac[TIME]}. Exiting.')
self.logger.exception(f"Maybe you should delete Activity '{self.activity.name}' first?")
sys.exit(-1)
# TIME axes are commonly shared amongst variables on different grids in timeseriesprofile data
# Keep track of axis names for use in logger info messages
time_axes_loaded.add(ac[TIME])
if nomLon and nomLat:
nom_point = Point(float(nomLon), float(nomLat))
# Expect that nomDepths is a numpy array, even it is single-valued
if nomDepths.any() and nom_point:
nls = []
for nd in nomDepths:
nl, _ = NominalLocation.objects.using(self.dbAlias).get_or_create(
depth=repr(nd), geom=nom_point, activity=self.activity)
nls.append(nl)
else:
nls = [None] * len(list(depths))
meass = []
for ip in ips:
for de, po, nl in zip(depths, points, nls):
if self.is_coordinate_bad(firstp, ip.timevalue, de):
self.logger.warn(f'Bad coordinate: {ip}, {de}')
meass.append(Measurement(depth=repr(de), geom=po, instantpoint=ip, nominallocation=nl))
try:
self.logger.info(f'Calling bulk_create() for {len(meass)} Measurements with batch_size = {BATCH_SIZE}')
meass = Measurement.objects.using(self.dbAlias).bulk_create(meass, batch_size=BATCH_SIZE)
except (IntegrityError, psycopg2.IntegrityError) as e:
self.logger.info(f"Depth axis '{ac[DEPTH]}' likely has depths already loaded from an axis in {depth_axes_loaded}")
self.logger.info(f'Getting matching Measurements from the database, creating new ones not yet there.')
meass_new = []
num_created = 0
for meas in meass:
meas_db, created = Measurement.objects.using(self.dbAlias).get_or_create(
instantpoint=meas.instantpoint, depth=meas.depth,
geom=meas.geom, nominallocation=meas.nominallocation)
if created:
num_created += 1
meass_new.append(meas_db)
meass = meass_new
self.logger.info(f'Got {len(meass) - num_created} Measurements from the database, created {num_created} new ones')
if not meass:
self.logger.error(f'Unable to load load Measurements for axis {ac[DEPTH]}. Exiting.')
self.logger.exception(f"Maybe you should delete Activity '{self.activity.name}' first?")
sys.exit(-1)
# DEPTH axes are commonly shared amongst variables on different grids in timeseriesprofile data
# Keep track of axis names for use in logger info messages
if DEPTH in ac:
depth_axes_loaded.add(ac[DEPTH])
# End if i == 0 (loading coords for list of pnames)
constraint_string = f"using python slice: ds['{pname}']['{pname}'][{tindx[0]}:{tindx[-1]}:{self.stride}]"
try:
values = self.ds[pname][pname].data[tindx[0]:tindx[-1]:self.stride]
except ValueError as e:
# Likely 'not enough values to unpack' because of self.stride exceeding range
self.logger.warn(f"{e}. Stride value of {self.stride} is likely too high.")
self.logger.warn(f"Skipping all parameters in coor_group {ac}")
continue
if len(values.shape) == 1:
self.logger.info("len(values.shape) = 1; likely EPIC timeseries data - reshaping to add a 'depth' dimension")
values = values.reshape(values.shape[0], 1)
# Need to bulk_create() all values, set bad ones to None and remove them after insert
values = self._good_value_generator(pname, values.flatten())
mps = (MeasuredParameter(measurement=me, parameter=self.param_by_key[pname],
datavalue=va) for me, va in zip(meass, values))
# All items but mess are generators, so we can call len() on it
self.logger.info(f'Bulk loading {len(meass)} {self.param_by_key[pname]} datavalues into MeasuredParameter {constraint_string} with batch_size = {BATCH_SIZE}')
self.logger.info(f"Time data: {self.url}.ascii?{ac[TIME]}[{tindx[0]}:{self.stride}:{tindx[-1] - 1}]")
mps = MeasuredParameter.objects.using(self.dbAlias).bulk_create(mps, batch_size=BATCH_SIZE)
total_loaded += len(mps)
return total_loaded
def _measurement_with_instantpoint(self, ips, meass):
for ip, meas in zip(ips, meass):
meas.instantpoint = ip
yield meas
def _bulk_load_coordinates(self, ips, meass, dup_times, ac, axes):
self.logger.info(f'Calling bulk_create() for InstantPoints in ips generator')
# Create mask array in case any coordinate is None, so that we can know which MPs to bulk_create()
mask = []
ips_to_load = []
meas_to_load = []
for ip, meas, dt in zip(ips, meass, dup_times):
if not ip or not meas or dt:
mask.append(True)
else:
mask.append(False)
ips_to_load.append(ip)
meas_to_load.append(meas)
try:
self.ips = InstantPoint.objects.using(self.dbAlias).bulk_create(ips_to_load, batch_size=BATCH_SIZE)
except IntegrityError as e:
# Some data sets (e.g. Waveglider) share time coordinates with different depths
# Report the reuse of previous self.ips values
if hasattr(self, 'ips'):
self.logger.info(f"Duplicate time values for axes {axes}. Reusing previously loaded time values for {ac['time']}")
else:
self.logger.error(f"{e}")
self.logger.error(f"It's likely that the {ac['time']} variable in {self.url} has a duplicate value")
raise DuplicateData(f"Duplicate data from {self.url} in {self.dbAlias}")
meass = self._measurement_with_instantpoint(self.ips, meas_to_load)
self.logger.info(f'Calling bulk_create() for Measurements in meass generator with batch_size = {BATCH_SIZE}')
meass = Measurement.objects.using(self.dbAlias).bulk_create(meass, batch_size=BATCH_SIZE)
return meass, mask
def _measuredparameter_with_measurement(self, meass, mps):
for meas, mp in zip(meass, mps):
mp.measurement = meas
yield mp
def _delete_bad_datavalues(self, pname):
num, _ = (MeasuredParameter.objects.using(self.dbAlias)
.filter(parameter__name=pname, datavalue=np.nan).delete())
if num:
self.logger.info(f'Deleted {num} nan {pname} MeasuredParameters')
num, _ = (MeasuredParameter.objects.using(self.dbAlias)
.filter(parameter__name=pname, datavalue=np.inf).delete())
if num:
self.logger.info(f'Deleted {num} inf {pname} MeasuredParameters')
def _post_process_updates(self, mps_loaded, featureType='', add_to_activity=None):
#
# Query database to a path for trajectory or stationPoint for timeSeriesProfile and timeSeries
#
stationPoint = None
path = None
if add_to_activity:
self.activity = add_to_activity
linestringPoints = Measurement.objects.using(self.dbAlias).filter(instantpoint__activity=self.activity
).order_by('instantpoint__timevalue').values_list('geom')
try:
path = LineString([p[0] for p in linestringPoints]).simplify(tolerance=.001)
except (TypeError, ValueError) as e:
# Likely "LineString requires at least 2 points, got 1."
self.logger.warn('%s', e)
self.logger.info('Leaving path set to None')
else:
if len(path) == 2:
self.logger.info("Length of path = 2: path = %s", path)
if path[0][0] == path[1][0] and path[0][1] == path[1][1]:
self.logger.info("And the 2 points are identical. Saving the first point of this"
" path as a point as the featureType is also %s.", featureType)
stationPoint = Point(path[0][0], path[0][1])
path = None
else:
# Use NominalLocation - for cases when we have precise GPS locations
lon = set([p.x for p in NominalLocation.objects.using(self.dbAlias)
.filter(activity=self.activity)
.values_list('geom', flat=True)])
lat = set([p.y for p in NominalLocation.objects.using(self.dbAlias)
.filter(activity=self.activity)
.values_list('geom', flat=True)])
if lon and lat:
if len(lon) != 1 or len(lat) != 1:
self.logger.error(f"For activity={self.activity} length of nominal latitudes and longitudes != 1")
else:
stationPoint = Point(lon.pop(), lat.pop())
# Add additional Parameters for all appropriate Measurements
self.logger.info("Adding SigmaT and Spiciness to the Measurements...")
self.addSigmaTandSpice(self.activity)
if self.grdTerrain:
self.logger.info("Adding altitude to the Measurements...")
try:
self.addAltitude(self.activity)
except FileNotFound as e:
self.logger.warn(str(e))
# Bulk loading of stoqs calculated values may introduce NaNs, remove them
for pname in (SIGMAT, SPICE, ALTITUDE):
self._delete_bad_datavalues(pname)
# Update the Activity with information we now have following the load
try:
varList = ', '.join(set(list(self.ds.keys())) & set(self.varsLoaded))
except AttributeError:
# ROVCTDloader creates self.vSeen dictionary with counts of each parameter
varList = ', '.join(list(self.vSeen.keys()))
# Construct a meaningful comment that looks good in the UI Metadata->NetCDF area
if hasattr(self, 'add_to_activity'):
act_to_update = Activity.objects.using(self.dbAlias).get(id=self.add_to_activity.id)
load_comment = f"{act_to_update.comment} - Loaded variables {varList} from {self.url}"
load_comment += f" (added to Activity {self.add_to_activity.name})"
elif hasattr(self, 'associatedActivityName'):
act_to_update = Activity.objects.using(self.dbAlias).get(name=self.associatedActivityName)
load_comment = f"{act_to_update.comment} - Loaded variables {varList} from {self.url}"
load_comment += f" (added to Activity {self.associatedActivityName})"
else:
act_to_update = Activity.objects.using(self.dbAlias).get(id=self.activity.id)
load_comment = f"Loaded variables {varList} from {self.url}"
if hasattr(self, 'requested_startDatetime') and hasattr(self, 'requested_endDatetime'):
if self.requested_startDatetime and self.requested_endDatetime:
load_comment += f" between {self.requested_startDatetime} and {self.requested_endDatetime}"
load_comment += f" with a stride of {self.stride} on {str(datetime.utcnow()).split('.')[0]}Z "
self.logger.debug("Updating its comment with load_comment = %s", load_comment)
if hasattr(self, 'add_to_activity') or hasattr(self, 'associatedActivityName'):
num_updated = Activity.objects.using(self.dbAlias).filter(id=act_to_update.id).update(
comment=load_comment,
num_measuredparameters=mps_loaded + act_to_update.num_measuredparameters)
else:
num_updated = Activity.objects.using(self.dbAlias).filter(id=act_to_update.id).update(
name=self.getActivityName(),
comment=load_comment,
maptrack=path,
mappoint=stationPoint,
num_measuredparameters=mps_loaded,
loaded_date=datetime.utcnow())
self.logger.debug("%d activitie(s) updated with new attributes.", num_updated)
#
# Add resources after loading data to capture additional metadata that may be added
#
try:
self.addResources()
except IntegrityError as e:
self.logger.error('Failed to properly addResources: %s', e)
#
# Update the stats and store simple line values
#
self.updateActivityMinMaxDepth(act_to_update)
self.updateActivityParameterStats(act_to_update)
self.updateCampaignStartEnd()
self.assignParameterGroup(groupName=MEASUREDINSITU)
if featureType == TRAJECTORY:
if hasattr(self, 'critSimpleDepthTime'):
# Loader may have this attribute set, e.g. for BED that need less simplification
self.insertSimpleDepthTimeSeries(critSimpleDepthTime=self.critSimpleDepthTime)
else:
self.insertSimpleDepthTimeSeries()
self.saveBottomDepth()
self.insertSimpleBottomDepthTimeSeries()
elif featureType == TIMESERIES or featureType == TIMESERIESPROFILE:
self.insertSimpleDepthTimeSeriesByNominalDepth()
elif featureType == TRAJECTORYPROFILE:
self.insertSimpleDepthTimeSeriesByNominalDepth(trajectoryProfileDepths=self.timeDepthProfiles)
self.logger.info("Data load complete, %d records loaded.", mps_loaded)
return path
def process_trajectory_values_from_generator(self, data_generator):
'''Use original method to load a MeasuredParameter datavalue a value
at a time into the database. Works only for featureType='trajectory'.
'''
self.initDB()
path = None
last_key = None
self.param_by_key = {}
self.parameter_counts = defaultdict(lambda: 0)
featureType='trajectory'
mps_loaded = 0
for row in data_generator():
row = self.preProcessParams(row)
(longitude, latitude, mtime, depth) = (
row.pop('longitude'),
row.pop('latitude'),
from_udunits(row.pop('time'), row.pop('timeUnits')),
row.pop('depth'))
key, value = list(row.items()).pop()
value = float(value)
if key != last_key:
logger.info(f'Loading values for Parameter {key}')
last_key = key
point = Point(longitude, latitude)
self.param_by_key[key] = self.getParameterByName(key)
self.parameter_counts[self.param_by_key[key]] += 1
ip,_ = InstantPoint.objects.using(self.dbAlias).get_or_create(
activity=self.activity, timevalue=mtime)
meas,_ = Measurement.objects.using(self.dbAlias).get_or_create(
instantpoint=ip, geom=point, depth=depth)
mp = MeasuredParameter(measurement=meas,
parameter=self.param_by_key[key], datavalue=value)
mp.save(using=self.dbAlias)
mps_loaded += 1
self.totalRecords = self.getTotalRecords()
path = self._post_process_updates(mps_loaded, featureType)
return mps_loaded, path, self.parameter_counts
def process_data(self, featureType='', add_to_activity=None):
'''Bulk copy measurement data into database
'''
self.coord_dicts = {}
for v in self.include_names:
try:
self.coord_dicts[v] = self.getAuxCoordinates(v)
except ParameterNotFound as e:
self.logger.debug(str(e))
except VariableHasBadCoordinatesAttribute as e:
self.logger.error(str(e))
self.initDB()
path = None
parmCount = {}
self.parameter_counts = {}
for key in self.include_names:
parmCount[key] = 0
if getattr(self, 'command_line_args', False):
if self.command_line_args.append:
self.dataStartDatetime = (InstantPoint.objects.using(self.dbAlias)
.filter(activity__name=self.getActivityName())
.aggregate(Max('timevalue'))['timevalue__max'])
self.param_by_key = {}
self.mv_by_key = {}
self.fv_by_key = {}
for key in (set(self.include_names) & set(self.ds.keys())):
parameter_name, _ = self.parameter_name(key)
self.param_by_key[key] = self.getParameterByName(parameter_name)
self.parameter_counts[self.param_by_key[key]] = 0
for key in self.ds.keys():
self.mv_by_key[key] = self.getmissing_value(key)
self.fv_by_key[key] = self.get_FillValue(key)
self.logger.info("From: %s", self.url)
if featureType:
featureType = featureType.lower()
else:
featureType = self.getFeatureType()
mps_loaded = 0
try:
if featureType== TRAJECTORY:
mps_loaded = self.load_trajectory(add_to_activity=add_to_activity)
elif featureType == TIMESERIES:
mps_loaded = self.load_timeseriesprofile()
elif featureType == TIMESERIESPROFILE:
mps_loaded = self.load_timeseriesprofile()
elif featureType == TRAJECTORYPROFILE:
self.logger.warn(f"Loader for featureType {featureType} has not yet been implemented")
else:
raise Exception(f"Global attribute 'featureType' is not one of '{TRAJECTORY}',"
" '{TIMESERIES}', or '{TIMESERIESPROFILE}' - see:"
" http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/ch09.html")
self.totalRecords = mps_loaded
except (IntegrityError, DuplicateData) as e:
# Likely duplicate key value violates unique constraint "stoqs_measuredparameter_measurement_id_parameter_1328c3fb_uniq"
# Can't append data from source with bulk_create(), give appropriate warning
self.logger.exception(str(e))
self.logger.error(f'Failed to bulk_create() data from URL: {self.url}')
self.logger.error(f'If you need to load data that has been appended to the URL then delete its Activity before loading.')
return mps_loaded, path, parmCount
except KeyError as e:
# Likely an include_name variable has a bad coordinates attribute, give a better error message than just KeyError
self.logger.exception(str(e))
self.logger.error(f'Failed to bulk_create() data from URL: {self.url}')
return mps_loaded, path, parmCount
if mps_loaded:
# Bulk loading may introduce None values, remove them
MeasuredParameter.objects.using(self.dbAlias).filter(datavalue=None, dataarray=None).delete()
# Removing Nones above may leave a Parameter without any MeasuredParameters, remove them
for parameter in self.parameter_counts.copy().keys():
mp_count = MeasuredParameter.objects.using(self.dbAlias).filter(parameter=parameter).count()
self.logger.info(f"{parameter.name:40} count: {mp_count:6}")
if mp_count == 0:
self.logger.info(f"Deleting Parameter because it has no valid data: {parameter}")
try:
del parmCount[parameter.name.split(' ')[0]]
del self.parameter_counts[parameter]
del self.parameter_dict[parameter.name]
except KeyError as e:
self.logger.warning(f"{e} not from Activity {self.activity}")
parameter.delete(using=self.dbAlias)
else:
parmCount[parameter.name.split(' ')[0]] = mp_count
path = self._post_process_updates(mps_loaded, featureType, add_to_activity=add_to_activity)
return mps_loaded, path, parmCount
class Trajectory_Loader(Base_Loader):
'''
Generic loader for trajectory data. May be subclassed if special data or metadata processing
is needed for a particular kind of trajectory data.
'''
include_names = ['temperature', 'conductivity']
def preProcessParams(self, row):
'''
Compute on-the-fly any additional parameters for loading into the database
'''
# Compute salinity if it's not in the record and we have temperature, conductivity, and pressure
##if row.has_key('temperature') and row.has_key('pressure') and row.has_key('latitude'):
## conductivity_ratio = row['conductivity'] /
## row['salinity'] = sw.salt(conductivity_ratio, sw.T90conv(row['temperature']), row['pressure'])
# TODO: Compute sigma-t if we have standard_names of sea_water_salinity, sea_water_temperature and sea_water_pressure
# TODO: Lookup bottom depth here and create new bottom depth and altitude parameters...
return super(Trajectory_Loader, self).preProcessParams(row)
class Dorado_Loader(Trajectory_Loader):
'''
MBARI Dorado data as read from the production archive. This class includes overriden methods
to load quick-look plot and other Resources into the STOQS database.
'''
def addResources(self):
'''
In addition to the NC_GLOBAL attributes that are added in the base class also add the quick-look plots that are on the dods server.
'''
if not self.url.endswith('_decim.nc'):
return super(Dorado_Loader, self).addResources()
baseUrl = 'http://dods.mbari.org/data/auvctd/surveys'
survey = self.url.split('/')[-1].split('.nc')[0].split('_decim')[0] # Works for both .nc and _decim.nc files
yyyy = int(survey.split('_')[1])
# Quick-look plots
self.logger.debug("Getting or Creating ResourceType quick_look...")
resourceType, _ = ResourceType.objects.db_manager(self.dbAlias).get_or_create(
name='quick_look', description='Quick Look plot of data from this AUV survey')
for ql in ['2column', 'biolume', 'hist_stats', 'lopc', 'nav_adjust', 'prof_stats']:
url = '%s/%4d/images/%s_%s.png' % (baseUrl, yyyy, survey, ql)
self.logger.debug("Getting or Creating Resource with name = %s, url = %s", ql, url )
resource, _ = Resource.objects.db_manager(self.dbAlias).get_or_create(
name=ql, uristring=url, resourcetype=resourceType)
ActivityResource.objects.db_manager(self.dbAlias).get_or_create(
activity=self.activity,
resource=resource)
# kml, odv, mat
kmlResourceType, _ = ResourceType.objects.db_manager(self.dbAlias).get_or_create(
name = 'kml', description='Keyhole Markup Language file of data from this AUV survey')
odvResourceType, _ = ResourceType.objects.db_manager(self.dbAlias).get_or_create(
name = 'odv', description='Ocean Data View spreadsheet text file')
matResourceType, _ = ResourceType.objects.db_manager(self.dbAlias).get_or_create(
name = 'mat', description='Matlab data file')
for res in ['kml', 'odv', 'odvGulper', 'mat', 'mat_gridded']:
if res == 'kml':
url = '%s/%4d/kml/%s.kml' % (baseUrl, yyyy, survey)
rt = kmlResourceType
elif res == 'odv':
url = '%s/%4d/odv/%s.txt' % (baseUrl, yyyy, survey)
rt = odvResourceType
elif res == 'odvGulper':
url = '%s/%4d/odv/%s_Gulper.txt' % (baseUrl, yyyy, survey)
rt = odvResourceType
elif res == 'mat':
url = '%s/%4d/mat/%s.mat' % (baseUrl, yyyy, survey)
rt = matResourceType
elif res == 'mat_gridded':
url = '%s/%4d/mat/%s_gridded.mat' % (baseUrl, yyyy, survey)
rt = matResourceType
else:
self.logger.warn('No handler for res = %s', res)
self.logger.debug("Getting or Creating Resource with name = %s, url = %s", res, url )
resource, _ = Resource.objects.db_manager(self.dbAlias).get_or_create(
name=res, uristring=url, resourcetype=rt)
ActivityResource.objects.db_manager(self.dbAlias).get_or_create(
activity=self.activity, resource=resource)
return super(Dorado_Loader, self).addResources()
class Lrauv_Loader(Trajectory_Loader):
'''
MBARI Long Range AUV data loader.
'''
include_names = [
'mass_concentration_of_oxygen_in_sea_water',
'mole_concentration_of_nitrate_in_sea_water',
'mass_concentration_of_chlorophyll_in_sea_water',
'sea_water_salinity',
'sea_water_temperature',
]
def __init__(self, contourUrl, timezone, critSimpleDepthTime, *args, **kwargs):
self.contourUrl = contourUrl
self.timezone = timezone
self.critSimpleDepthTime = critSimpleDepthTime
super(Lrauv_Loader, self).__init__(*args, **kwargs)
def addResources(self):
'''
In addition to the NC_GLOBAL attributes that are added in the base class also add the quick-look plots that are on the dods server.
'''
if self.contourUrl and self.timezone: # pragma: no cover
# Replace netCDF file with png extension
outurl = re.sub('\.nc$','.png', self.url)
# Contour plots
self.logger.debug("Getting or Creating ResourceType quick_look...")
resourceType, _ = ResourceType.objects.db_manager(self.dbAlias).get_or_create(
name = 'quick_look', description='Quick Look plot of data from this AUV survey')
self.logger.debug("Getting or Creating Resource with name = log, url = %s", outurl)
resource, _ = Resource.objects.db_manager(self.dbAlias).get_or_create(
name='log', uristring=outurl, resourcetype=resourceType)
ActivityResource.objects.db_manager(self.dbAlias).get_or_create(
activity=self.activity,
resource=resource)
startDateTimeUTC = pytz.utc.localize(self.startDatetime)
startDateTimeLocal = startDateTimeUTC.astimezone(pytz.timezone(self.timezone))
startDateTimeLocal = startDateTimeLocal.replace(hour=0,minute=0,second=0,microsecond=0)
startDateTimeUTC = startDateTimeLocal.astimezone(pytz.utc)
endDateTimeUTC = pytz.utc.localize(self.startDatetime)
endDateTimeLocal = endDateTimeUTC.astimezone(pytz.timezone(self.timezone))
endDateTimeLocal = endDateTimeLocal.replace(hour=23,minute=59,second=0,microsecond=0)
endDateTimeUTC = endDateTimeLocal.astimezone(pytz.utc)
outurl = self.contourUrl + self.platformName + '_log_' + startDateTimeUTC.strftime(
'%Y%m%dT%H%M%S') + '_' + endDateTimeUTC.strftime('%Y%m%dT%H%M%S') + '.png'
self.logger.debug("Getting or Creating Resource with name = 24hr, url = %s", outurl)
resource, _ = Resource.objects.db_manager(self.dbAlias).get_or_create(
name='24hr', uristring=outurl, resourcetype=resourceType)
ActivityResource.objects.db_manager(self.dbAlias).get_or_create(
activity=self.activity,
resource=resource)
return super(Lrauv_Loader, self).addResources()
class Glider_Loader(Trajectory_Loader):
'''
CenCOOS Line 66 Spray glider data loader
'''
include_names=['TEMP', 'PSAL', 'OPBS', 'FLU2']
def preProcessParams(self, row):
'''
Placeholder for any special preprocessing for Glider data
'''
return super(Glider_Loader,self).preProcessParams(row)
class TimeSeries_Loader(Base_Loader):
'''
Generic loader for station (non-trajectory) data. Expects CF-1.6 timeSeries discrete sampling geometry featureType.
'''
# Subclasses or calling function must specify include_names
include_names=[]
def preProcessParams(self, row):
'''
Placeholder for any special preprocessing, for example adding sigma-t or other derived parameters
'''
return super(TimeSeries_Loader,self).preProcessParams(row)
class Mooring_Loader(TimeSeries_Loader):
'''
OceanSITES formatted Mooring data loader. Expects CF-1.6 timeSeriesProfile discrete sampling geometry type.
'''
include_names=['Temperature', 'Salinity', 'TEMP', 'PSAL', 'ATMP', 'AIRT', 'WDIR', 'WSDP']
def preProcessParams(self, row):
'''
Placeholder for any special preprocessing for Mooring data
'''
return super(Mooring_Loader,self).preProcessParams(row)
class BED_TS_Loader(TimeSeries_Loader):
'''
Benthic Event Detector timeSeries data. Expects CF-1.6 timeSeries discrete sampling geometry type.
'''
include_names = ['XA', 'YA', 'ZA', 'XR', 'YR', 'ZR', 'PRESS', 'BED_DEPTH']
def preProcessParams(self, row):
'''
Placeholder for any special preprocessing for Mooring data
'''
return super(BED_TS_Loader, self).preProcessParams(row)
class BED_Trajectory_Loader(Trajectory_Loader):
'''
Benthic Event Detector trajectory data. Expects CF-1.6 timeSeries discrete sampling geometry type.
'''
include_names = ['XA', 'YA', 'ZA', 'A', 'XR', 'YR', 'ZR', 'ROTRATE', 'ROTCOUNT', 'P', 'P_ADJUSTED', 'DEPTH']
def __init__(self, framegrab, critSimpleDepthTime, *args, **kwargs):
self.framegrab = framegrab
self.critSimpleDepthTime = critSimpleDepthTime
super(BED_Trajectory_Loader, self).__init__(*args, **kwargs)
def addResources(self): # pragma: no cover
'''
In addition to the NC_GLOBAL attributes that are added in the base class also add the frame grab URL
'''
self.logger.debug("Getting or Creating ResourceType framegrab...")
resourceType, _ = ResourceType.objects.using(self.dbAlias).get_or_create(
name='quick_look', description='Video framegrab of BED located on sea floor')
self.logger.debug("Getting or Creating Resource with framegrab = self.framegrab")
link_text = 'framegrab'
if self.framegrab.endswith('.m4v') or self.framegrab.endswith('.mov'):
link_text = 'video'
resource, _ = Resource.objects.using(self.dbAlias).get_or_create(
name=link_text, uristring=self.framegrab, resourcetype=resourceType)
ActivityResource.objects.using(self.dbAlias).get_or_create(
activity=self.activity, resource=resource)
return super(BED_Trajectory_Loader, self).addResources()
#
# Helper methods that expose a common interface for executing the loaders for specific platforms
#
def runTrajectoryLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList, dbAlias,
stride, plotTimeSeriesDepth=None, grdTerrain=None, command_line_args=None):
'''
Run the DAPloader for Generic AUVCTD trajectory data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
If a number vaue is given to plotTimeSeriesDepth then that Resource is added for each
Parameter loaded; this gives instruction to the STOQS UI to also plot timeSries data
in the Parameter tab.
'''
loader = Trajectory_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
grdTerrain = grdTerrain,
command_line_args = command_line_args)
loader.include_names = parmList
# Fix up legacy data files
if loader.auxCoords is None:
loader.auxCoords = {}
if aName.find('_jhmudas_v1') != -1:
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
if plotTimeSeriesDepth is not None:
# Used first for BEDS where we want both trajectory and timeSeries plots
loader.plotTimeSeriesDepth = dict.fromkeys(parmList + [ALTITUDE, SIGMAT, SPICE], plotTimeSeriesDepth)
loader.process_data()
loader.logger.debug("Loaded Activity with name = %s", aName)
def runBEDTrajectoryLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName,
parmList, dbAlias, stride, plotTimeSeriesDepth=None,
grdTerrain=None, framegrab=None, critSimpleDepthTime=1): # pragma: no cover
'''
Run the DAPloader for Benthic Event Detector trajectory data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
If a number vaue is given to plotTimeSeriesDepth then that Resource is added for each
Parameter loaded; this gives instruction to the STOQS UI to also plot timeSries data
in the Parameter tab.
'''
loader = BED_Trajectory_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
grdTerrain = grdTerrain,
framegrab = framegrab,
critSimpleDepthTime = critSimpleDepthTime)
loader.include_names = parmList
if plotTimeSeriesDepth:
# Used first for BEDS where we want both trajectory and timeSeries plots - assumes starting depth of BED
loader.plotTimeSeriesDepth = dict.fromkeys(parmList + ['altitude'], plotTimeSeriesDepth)
loader.process_data()
loader.logger.debug("Loaded Activity with name = %s", aName)
def _loadLOPC(url, stride, loader, cName, cDesc, dbAlias, aTypeName, pName,
pColor, pTypeName, grdTerrain, plotTimeSeriesDepth):
# Construct LOPC data url that looks like:
# http://dods.mbari.org/opendap/data/ssdsdata/ssds/generated/netcdf/files/ssds.shore.mbari.org/auvctd/missionlogs/2010/2010300/2010.300.00/lopc.nc
# from url that looks like: http://dods.mbari.org/opendap/data/auvctd/surveys/2010/netcdf/Dorado389_2010_300_00_300_00_decim.nc
# or like: http://odss.mbari.org/thredds/dodsC/CANON_march2013/dorado/Dorado389_2013_074_02_074_02_decim.nc
# TODO: Handle multiple missions that compose a survey
survey = url[url.find('Dorado389'):]
yr = survey.split('_')[1]
yd = survey.split('_')[2]
mn = survey.split('_')[3]
lopc_url = ('http://dods.mbari.org/opendap/data/ssdsdata/ssds/generated/netcdf/'
'files/ssds.shore.mbari.org/auvctd/missionlogs/{}/{}/{}.{}.{}/'
'lopc.nc').format(yr, yr + yd, yr, yd, mn)
lopc_aName = '{} (stride={})'.format(lopc_url, stride)
loader.logger.debug("Instantiating Dorado_Loader for url = %s", lopc_url)
try:
# As we use the Measurements from the original Activity, associate the LOPC
# MeasuredParameters with it as well so that we can compare them in the UI
lopc_loader = Dorado_Loader(url = lopc_url, campaignName = cName,
campaignDescription = cDesc, dbAlias = dbAlias,
activityName = loader.activity.name,
activitytypeName = loader.activity.activitytype.name,
platformName = pName, platformColor = pColor,
platformTypeName = pTypeName, stride = stride,
grdTerrain = grdTerrain)
except Exception:
# Fail somewhat silently
loader.logger.warn('No LOPC data to load at %s', lopc_url)
return
lopc_loader.include_names = ['sepCountList', 'mepCountList']
if plotTimeSeriesDepth is not None:
lopc_loader.plotTimeSeriesDepth = dict.fromkeys(lopc_loader.include_names, plotTimeSeriesDepth)
# These get added to ignored_names on previous .process_data(), remove them
if 'sepCountList' in lopc_loader.ignored_names:
lopc_loader.ignored_names.remove('sepCountList')
if 'mepCountList' in lopc_loader.ignored_names:
lopc_loader.ignored_names.remove('mepCountList')
lopc_loader.associatedActivityName = loader.activityName
# Auxillary coordinates are the same for all include_names
lopc_loader.auxCoords = {}
for v in lopc_loader.include_names:
lopc_loader.auxCoords[v] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
Dorado_Loader.getFeatureType = lambda self: TRAJECTORY
try:
# Specify featureType so that non-CF LOPC data can be loaded
lopc_loader.process_data(featureType=TRAJECTORY)
except VariableMissingCoordinatesAttribute as e:
loader.logger.exception(str(e))
except NoValidData as e:
loader.logger.warn(str(e))
except KeyError as e:
loader.logger.warn(str(e))
else:
loader.logger.debug("Loaded Activity with name = %s", lopc_loader.activityName)
def _load_plankton_proxies(url, stride, loader, cName, cDesc, dbAlias, aTypeName, pName,
pColor, pTypeName, grdTerrain, plotTimeSeriesDepth):
survey = url[url.find('Dorado389'):]
yr = survey.split('_')[1]
yd = survey.split('_')[2]
mn = survey.split('_')[3]
# http://odss.mbari.org/thredds/dodsC/Other/routine/Products/Dorado/netcdf_proxies/2003/Dorado_2003_340_02_proxies.nc
pp_url = ('http://odss.mbari.org/thredds/dodsC/Other/routine/Products/Dorado/'
'netcdf_proxies/{}/Dorado_{}_{}_{}_proxies.nc').format(yr, yr, yd, mn)
pp_aName = '{} (stride={})'.format(pp_url, stride)
loader.logger.debug("Instantiating Trajectory_Loader for url = %s", pp_url)
try:
pp_loader = Trajectory_Loader(url = pp_url, campaignName = cName,
campaignDescription = cDesc, dbAlias = dbAlias,
activityName = pp_aName, activitytypeName = aTypeName,
platformName = pName, platformColor = pColor,
platformTypeName = pTypeName, stride = stride,
grdTerrain = grdTerrain)
except Exception:
loader.logger.warn('No plankton proxy data to load at %s', pp_url)
return
pp_loader.include_names = ['adinos', 'bg_biolum', 'diatoms', 'fluo', 'hdinos', 'intflash', 'nbflash_high', 'nbflash_low', 'profile']
if plotTimeSeriesDepth is not None:
pp_loader.plotTimeSeriesDepth = dict.fromkeys(pp_loader.include_names, plotTimeSeriesDepth)
# Auxillary coordinates are the same for all include_names
pp_loader.auxCoords = {}
for v in pp_loader.include_names:
pp_loader.auxCoords[v] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
Trajectory_Loader.getFeatureType = lambda self: TRAJECTORY
try:
# Specify featureType so that non-CF LOPC data can be loaded
pp_loader.add_to_activity=loader.activity
pp_loader.process_data(featureType=TRAJECTORY, add_to_activity=loader.activity)
except VariableMissingCoordinatesAttribute as e:
loader.logger.exception(str(e))
except NoValidData as e:
loader.logger.warn(str(e))
except KeyError as e:
loader.logger.warn(str(e))
else:
loader.logger.debug("Loaded Activity with name = %s", pp_loader.activityName)
def runDoradoLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList,
dbAlias, stride, grdTerrain=None, plotTimeSeriesDepth=None, plankton_proxies=False):
'''
Run the DAPloader for Dorado AUVCTD trajectory data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
'''
loader = Dorado_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
grdTerrain = grdTerrain)
if parmList:
loader.include_names = parmList
# Auxillary coordinates are the same for all include_names
loader.auxCoords = {}
for v in loader.include_names:
loader.auxCoords[v] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
if plotTimeSeriesDepth is not None:
# Useful in some situations to have simple time series display of Dorado data
loader.plotTimeSeriesDepth = dict.fromkeys(parmList + [ALTITUDE, SIGMAT, SPICE], plotTimeSeriesDepth)
try:
mps_loaded, _, _ = loader.process_data()
except VariableMissingCoordinatesAttribute as e:
loader.logger.exception(str(e))
loader.logger.info(f"Loaded Activity {aName} with {mps_loaded} MeasuredParameters")
if mps_loaded:
if 'sepCountList' in loader.include_names or 'mepCountList' in loader.include_names:
_loadLOPC(url, stride, loader, cName, cDesc, dbAlias, aTypeName, pName, pColor, pTypeName, grdTerrain, plotTimeSeriesDepth)
if plankton_proxies:
_load_plankton_proxies(url, stride, loader, cName, cDesc, dbAlias, aTypeName, pName, pColor, pTypeName, grdTerrain, plotTimeSeriesDepth)
else:
loader.logger.warn(f"Did not load any MeasuredParameters from {loader.url}")
return mps_loaded
def runLrauvLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList, dbAlias,
stride=1, startDatetime=None, endDatetime=None, grdTerrain=None,
dataStartDatetime=None, contourUrl=None, auxCoords=None, timezone='America/Los_Angeles',
command_line_args=None, plotTimeSeriesDepth=None, critSimpleDepthTime=10): # pragma: no cover
'''
Run the DAPloader for Long Range AUVCTD trajectory data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
'''
loader = Lrauv_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
startDatetime = startDatetime,
endDatetime = endDatetime,
dataStartDatetime = dataStartDatetime,
grdTerrain = grdTerrain,
contourUrl = contourUrl,
auxCoords = auxCoords,
timezone = timezone,
command_line_args = command_line_args,
critSimpleDepthTime = critSimpleDepthTime)
if parmList:
loader.include_names = []
for p in parmList:
if p.find('.') == -1:
loader.include_names.append(p)
else:
loader.logger.warn('Parameter %s not included. CANNOT HAVE PARAMETER NAMES WITH PERIODS. Period.', p)
# Auxiliary coordinates are generally the same for all include_names
if auxCoords is None:
loader.auxCoords = {}
if url.endswith('shore.nc'):
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'Time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
else:
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
if plotTimeSeriesDepth is not None:
# Useful to plot as time series engineering data for LRAUVs
loader.plotTimeSeriesDepth = dict.fromkeys(parmList + [ALTITUDE, SIGMAT, SPICE], plotTimeSeriesDepth)
try:
loader.process_data()
except NoValidData as e:
loader.logger.warn(str(e))
raise
else:
loader.logger.debug("Loaded Activity with name = %s", aName)
def runGliderLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList,
dbAlias, stride, startDatetime=None, endDatetime=None, grdTerrain=None,
dataStartDatetime=None, plotTimeSeriesDepth=None, command_line_args=None): # pragma: no cover
'''
Run the DAPloader for Spray Glider trajectory data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
'''
loader = Glider_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
startDatetime = startDatetime,
endDatetime = endDatetime,
dataStartDatetime = dataStartDatetime,
grdTerrain = grdTerrain,
command_line_args = command_line_args)
if parmList:
loader.logger.debug("Setting include_names to %s", parmList)
loader.include_names = parmList
# Auxillary coordinates are the same for all include_names
# NOTE: Presence of coordinates variable attribute will override these assignments
loader.auxCoords = {}
if pTypeName == 'waveglider':
# for v in loader.include_names:
# loader.auxCoords[v] = {'time': 'TIME', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
pass
elif pName.startswith('Slocum'):
# Set depth to 0.0 for u and v as no depth coord is in the dataset's metadata
# - leave it up to the user not the data creator to decide this. :-(. Must also specify all other auxCoords.
loader.auxCoords['u'] = {'time': 'time_uv', 'latitude': 'lat_uv', 'longitude': 'lon_uv', 'depth': 0.0}
loader.auxCoords['v'] = {'time': 'time_uv', 'latitude': 'lat_uv', 'longitude': 'lon_uv', 'depth': 0.0}
loader.auxCoords['temperature'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['salinity'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['density'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['fluorescence'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['phycoerythrin'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['cdom'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['oxygen'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['optical_backscatter470nm'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['optical_backscatter532nm'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['optical_backscatter660nm'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
loader.auxCoords['optical_backscatter700nm'] = {'time': 'time', 'latitude': 'lat', 'longitude': 'lon', 'depth': 'depth'}
elif pName.startswith('SPRAY'):
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'TIME', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
elif pName.upper().startswith('NPS'):
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'TIME', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
elif url.find('waveglider_gpctd_WG') != -1:
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'TIME', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
elif url.find('waveglider_pco2_WG') != -1:
for p in loader.include_names:
loader.auxCoords[p] = {'time': 'TIME', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
# Fred is now writing according to CF-1.6 and we can expect compliance with auxillary coordinate attribute specifications for future files
if plotTimeSeriesDepth is not None:
# WaveGliders essentially stay at the surface it's handy to have the Parameter tab for their data
loader.plotTimeSeriesDepth = dict.fromkeys(parmList + ['altitude'], plotTimeSeriesDepth)
try:
loader.process_data()
except VariableMissingCoordinatesAttribute as e:
loader.logger.exception(str(e))
else:
loader.logger.debug("Loaded Activity with name = %s", aName)
def runTimeSeriesLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList, dbAlias,
stride, startDatetime=None, endDatetime=None, command_line_args=None):
'''
Run the DAPloader for Generic CF Metadata timeSeries featureType data.
Following the load important updates are made to the database.
'''
loader = TimeSeries_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
startDatetime = startDatetime,
endDatetime = endDatetime,
command_line_args = command_line_args)
if parmList:
loader.logger.debug("Setting include_names to %s", parmList)
loader.include_names = parmList
loader.process_data()
loader.logger.debug("Loaded Activity with name = %s", aName)
def runMooringLoader(url, cName, cDesc, aName, pName, pColor, pTypeName, aTypeName, parmList,
dbAlias, stride, startDatetime=None, endDatetime=None, dataStartDatetime=None,
command_line_args=None):
'''
Run the DAPloader for OceanSites formatted Mooring Station data and update the Activity with
attributes resulting from the load into dbAlias. Designed to be called from script
that loads the data. Following the load important updates are made to the database.
'''
loader = Mooring_Loader(
url = url,
campaignName = cName,
campaignDescription = cDesc,
dbAlias = dbAlias,
activityName = aName,
activitytypeName = aTypeName,
platformName = pName,
platformColor = pColor,
platformTypeName = pTypeName,
stride = stride,
startDatetime = startDatetime,
dataStartDatetime = dataStartDatetime,
endDatetime = endDatetime,
command_line_args = command_line_args,
)
if parmList:
loader.logger.debug("Setting include_names to %s", parmList)
loader.include_names = parmList
loader.auxCoords = {}
if url.endswith('_CMSTV.nc'):
# Special for combined file which has different coordinates for different variables
for v in loader.include_names:
if v in ['eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR']:
loader.auxCoords[v] = {'time': 'hr_time_adcp', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'HR_DEPTH_adcp'}
elif v in ['SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR']:
loader.auxCoords[v] = {'time': 'hr_time_ts', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
elif v in ['SW_FLUX_HR', 'AIR_TEMPERATURE_HR', 'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR']:
loader.auxCoords[v] = {'time': 'hr_time_met', 'latitude': 'Latitude', 'longitude': 'Longitude', 'depth': 'HR_DEPTH_met'}
else:
loader.logger.warn('Do not have an auxCoords assignment for variable %s in url %s', v, url)
elif url.find('_hs2_') != -1:
# Special for fluorometer on M1 - the HS2
for v in loader.include_names:
if v in ['bb470', 'bb676', 'fl676']:
loader.auxCoords[v] = {'time': 'esecs', 'latitude': 'Latitude', 'longitude': 'Longitude', 'depth': 'NominalDepth'}
elif url.find('OA') != -1: # pragma: no cover
# Special for OA moorings: only 'time' is lower case
for v in loader.include_names:
loader.auxCoords[v] = {'time': 'time', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
elif url.find('ccebin') != -1: # pragma: no cover
# Special for CCEBIN mooring
if 'adcp' in url:
for v in loader.include_names:
loader.auxCoords[v] = {'time': 'time', 'latitude': 'latitude', 'longitude': 'longitude', 'depth': 'depth'}
else:
for v in loader.include_names:
loader.auxCoords[v] = {'time': 'esecs', 'latitude': 'Latitude', 'longitude': 'Longitude', 'depth': 'NominalDepth'}
elif url.find('CCE_BIN') != -1: # pragma: no cover
# CCE_BIN file variables have coordinate attributes, no need to override
loader.auxCoords = []
else:
# Auxillary coordinates are the same for all include_names for _TS and _M files
for v in loader.include_names:
loader.auxCoords[v] = {'time': 'TIME', 'latitude': 'LATITUDE', 'longitude': 'LONGITUDE', 'depth': 'DEPTH'}
try:
loader.process_data()
loader.logger.debug("Loaded Activity with name = %s", aName)
except NoValidData as e:
loader.logger.warning(str(e))
if __name__ == '__main__':
# A nice test data load for a northern Monterey Bay survey
# See loaders/CANON/__init__.py for more examples of how these loaders are used
baseUrl = 'http://odss.mbari.org/thredds/dodsC/dorado/'
auv_file = 'Dorado389_2010_300_00_300_00_decim.nc'
parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume', 'roll', 'pitch', 'yaw',
'sepCountList', 'mepCountList'
]
stride = 1000 # Make large for quicker runs, smaller for denser data
dbAlias = 'default'
runDoradoLoader(baseUrl + auv_file, 'Campaign Name', 'Campaign Description',
'Activity Name', 'Platform Name - Dorado', 'ffeda0', 'auv',
'AUV Mission', parms, dbAlias, stride)
|
stoqs/stoqs
|
stoqs/loaders/DAPloaders.py
|
Python
|
gpl-3.0
| 144,075
|
[
"NetCDF"
] |
0143ccf3804a3af19bbbf35907354ff7469fcf685916bf76bc406c49cb02051c
|
# coding: utf-8
from django.shortcuts import render
def hello(request):
# request.GET, request.POST and request.COOKIES are dictionary-like
# objects, meaning you can access them like you can access a dictionary.
# So you can make request.GET['name'] or request.POST['name'] or
# request.COOKIES['name'] to get values from them. But as with dict, this
# will raise KeyError if the key does not exists. To avoid that, you
# can, like with dictionaries, use the get() method. It return the value
# or None if the value doesn't exists. If you provide a second argument
# it will return it if the key doesn't exist, and return the right value
# otherwise.
# Returns the value associated with 'name' passed via COOKIES, or an empty string.
name = request.COOKIES.get('name', '')
# Returns the value associated with 'name' passed via POST, or the
# value from COOKIES.
# This way POST always overwrites the value from COOKIES.
name = request.POST.get('name', name)
# Returns the value associated with 'name' passed via GET, or from POST.
# This way GET always overwrites the value from POST and COOKIES
name = request.GET.get('name', name)
# In case the user put a space at the beginning or end of their name,
# strip it, after ensuring it is a string
name = str(name).strip()
# Let's add default value to name
if not name:
name = "anonymous"
# Create a dictionary to pass this values in the context. We add
# request.method which will contains the string "POST", "GET", "PUT", etc
# according to the HTTP method that is used to access this view.
context = {'name': name, 'method': request.method}
# Instead of just returning the response, we store it in a variable.
# In Django, responses are objects, and we can manipulate them before
# sending them to the browser.
response = render(request, 'app5_hello.html', context)
# We set a cookie with the value "name" in the response, so next time
# the browser visit this view, it will send this value via a cookie.
response.set_cookie('name', name)
# Returns the modified response.
return response
|
sametmax/Django--an-app-at-a-time
|
apps/app5_get_post_and_cookies/views.py
|
Python
|
mit
| 2,209
|
[
"VisIt"
] |
25d1193b2ef5dedf1d60b93d4da2c506c8f1e03cc6177ce34e2b1a5ceefa29b0
|
import jinja2 as j
import os, errno
import visitors.visitor as v
import state_machinery.state_mach as sm
class Template(v.Visitor):
def __init__(self,filename):
self.filename = filename
loader = j.PackageLoader('statemach', 'visitors/templates')
self.environment = j.Environment(loader=loader)
def _mkdir_p(self,path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def use_filter(self, name, function):
self.environment.filters[name] = function
def visit(self, state_machine):
self.template = self.environment.get_template(self.filename)
output = self.template.render(
state_machine = state_machine,
states = state_machine.states.values(),
transitions = state_machine.transitions.values(),
attribute_groups = state_machine.attribute_group_list(),
actors = state_machine.actors)
self.path='output/'+state_machine.service+'/'+state_machine.resource
self._mkdir_p(self.path)
outfile =open(self.path+'/'+self.filename, 'w+')
print >>outfile, output
class TemplateAndCommand(Template):
def __init__(self,filename,command_template):
Template.__init__(self,filename)
self.command_template = command_template
def visit(self, state_machine):
Template.visit(self,state_machine)
system_command = self.command_template.format(path=self.path)
os.system(system_command)
|
bwtaylor/statemach
|
visitors/template.py
|
Python
|
apache-2.0
| 1,512
|
[
"VisIt"
] |
e55db31158025b9be0203b6cb4e4bab16f91bce3371a271c7f3c54d69d1ab7bf
|
import os
import MooseDocs
import utils
import logging
log = logging.getLogger(__name__)
def generate_options(parser, subparser):
"""
Command-line options for generate command.
"""
generate_parser = subparser.add_parser('generate', help="Check that documentation exists for your application and generate the markdown documentation from MOOSE application executable.")
generate_parser.add_argument('--disable-stubs', dest='stubs', action='store_false', help="Disable the creation of system and object stub markdown files.")
generate_parser.add_argument('--disable-pages-stubs', dest='pages_stubs', action='store_false', help="Disable the creation the pages.yml files.")
return generate_parser
def generate(config_file='moosedocs.yml', pages='pages.yml', stubs=False, pages_stubs=False, **kwargs):
"""
Generates MOOSE system and object markdown files from the source code.
Args:
config_file[str]: (Default: 'moosedocs.yml') The MooseMkDocs project configuration file.
"""
# Configuration file
if not os.path.exists(config_file):
raise IOError("The supplied configuration file was not found: {}".format(config_file))
# Read the configuration
config = MooseDocs.yaml_load(config_file)
config = config['markdown_extensions'][-1]['MooseDocs.extensions.MooseMarkdown']
# Run the executable
exe = config['executable']
if not os.path.exists(exe):
log.error('The executable does not exist: {}'.format(exe))
else:
log.debug("Executing {} to extract syntax.".format(exe))
raw = utils.runExe(exe, '--yaml')
yaml = utils.MooseYaml(raw)
# Populate the syntax
for key, value in config['locations'].iteritems():
if 'hide' in value:
value['hide'] += config.get('hide', [])
else:
value['hide'] = config.get('hide', [])
syntax = MooseDocs.MooseApplicationSyntax(yaml, name=key, stubs=stubs, pages_stubs=pages_stubs, pages=pages, **value)
log.info("Checking documentation for '{}'.".format(key))
syntax.check()
|
vityurkiv/Ox
|
python/MooseDocs/commands/generate.py
|
Python
|
lgpl-2.1
| 2,000
|
[
"MOOSE"
] |
e115a29a95bacab98f42d961957d6fc9ca096b65d2df29bcd2a55e52719cccc3
|
from __future__ import (absolute_import, division, print_function)
import numpy as np
import time
from mantid import mtd
from mantid.kernel import StringListValidator, Direction, FloatBoundedValidator
from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, FileAction, WorkspaceGroupProperty, Progress
from mantid.simpleapi import * # noqa
class IndirectILLReductionFWS(PythonAlgorithm):
_SAMPLE = 'sample'
_BACKGROUND = 'background'
_CALIBRATION = 'calibration'
_BACKCALIB = 'calibrationBackground'
_sample_files = None
_background_files = None
_calibration_files = None
_background_calib_files = None
_observable = None
_sortX = None
_red_ws = None
_back_scaling = None
_back_calib_scaling = None
_criteria = None
_progress = None
_back_option = None
_calib_option = None
_back_calib_option = None
_common_args = {}
_all_runs = None
def category(self):
return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
def summary(self):
return 'Performs fixed-window scan (FWS) multiple file reduction (both elastic and inelastic) ' \
'for ILL indirect geometry data, instrument IN16B.'
def name(self):
return "IndirectILLReductionFWS"
def PyInit(self):
self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']),
doc='Run number(s) of sample run(s).')
self.declareProperty(MultipleFileProperty('BackgroundRun',
action=FileAction.OptionalLoad,
extensions=['nxs']),
doc='Run number(s) of background (empty can) run(s).')
self.declareProperty(MultipleFileProperty('CalibrationRun',
action=FileAction.OptionalLoad,
extensions=['nxs']),
doc='Run number(s) of vanadium calibration run(s).')
self.declareProperty(MultipleFileProperty('CalibrationBackgroundRun',
action=FileAction.OptionalLoad,
extensions=['nxs']),
doc='Run number(s) of background (empty can) run(s) for vanadium run.')
self.declareProperty(name='Observable',
defaultValue='sample.temperature',
doc='Scanning observable, a Sample Log entry\n')
self.declareProperty(name='SortXAxis',
defaultValue=False,
doc='Whether or not to sort the x-axis\n')
self.declareProperty(name='BackgroundScalingFactor', defaultValue=1.,
validator=FloatBoundedValidator(lower=0),
doc='Scaling factor for background subtraction')
self.declareProperty(name='CalibrationBackgroundScalingFactor', defaultValue=1.,
validator=FloatBoundedValidator(lower=0),
doc='Scaling factor for background subtraction for vanadium calibration')
self.declareProperty(name='BackgroundOption',
defaultValue='Sum',
validator=StringListValidator(['Sum','Interpolate']),
doc='Whether to sum or interpolate the background runs.')
self.declareProperty(name='CalibrationOption',
defaultValue='Sum',
validator=StringListValidator(['Sum', 'Interpolate']),
doc='Whether to sum or interpolate the calibration runs.')
self.declareProperty(name='CalibrationBackgroundOption',
defaultValue='Sum',
validator=StringListValidator(['Sum', 'Interpolate']),
doc='Whether to sum or interpolate the background run for calibration runs.')
self.declareProperty(FileProperty('MapFile', '',
action=FileAction.OptionalLoad,
extensions=['map','xml']),
doc='Filename of the detector grouping map file to use. \n'
'By default all the pixels will be summed per each tube. \n'
'Use .map or .xml file (see GroupDetectors documentation) '
'only if different range is needed for each tube.')
self.declareProperty(name='ManualPSDIntegrationRange',defaultValue=[1,128],
doc='Integration range of vertical pixels in each PSD tube. \n'
'By default all the pixels will be summed per each tube. \n'
'Use this option if the same range (other than default) '
'is needed for all the tubes.')
self.declareProperty(name='Analyser',
defaultValue='silicon',
validator=StringListValidator(['silicon']),
doc='Analyser crystal.')
self.declareProperty(name='Reflection',
defaultValue='111',
validator=StringListValidator(['111', '311']),
doc='Analyser reflection.')
self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '',
direction=Direction.Output),
doc='Output workspace group')
self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
doc='The spectrum axis conversion target.')
def validateInputs(self):
issues = dict()
if self.getPropertyValue('CalibrationBackgroundRun') and not self.getPropertyValue('CalibrationRun'):
issues['CalibrationRun'] = 'Calibration runs are required, ' \
'if background for calibration is given.'
return issues
def setUp(self):
self._sample_files = self.getPropertyValue('Run')
self._background_files = self.getPropertyValue('BackgroundRun')
self._calibration_files = self.getPropertyValue('CalibrationRun')
self._background_calib_files = self.getPropertyValue('CalibrationBackgroundRun')
self._observable = self.getPropertyValue('Observable')
self._sortX = self.getProperty('SortXAxis').value
self._back_scaling = self.getProperty('BackgroundScalingFactor').value
self._back_calib_scaling = self.getProperty('CalibrationBackgroundScalingFactor').value
self._back_option = self.getPropertyValue('BackgroundOption')
self._calib_option = self.getPropertyValue('CalibrationOption')
self._back_calib_option = self.getPropertyValue('CalibrationBackgroundOption')
self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
# arguments to pass to IndirectILLEnergyTransfer
self._common_args['MapFile'] = self.getPropertyValue('MapFile')
self._common_args['Analyser'] = self.getPropertyValue('Analyser')
self._common_args['Reflection'] = self.getPropertyValue('Reflection')
self._common_args['ManualPSDIntegrationRange'] = self.getProperty('ManualPSDIntegrationRange').value
self._common_args['SpectrumAxis'] = self._spectrum_axis
self._red_ws = self.getPropertyValue('OutputWorkspace')
suffix = ''
if self._spectrum_axis == 'SpectrumNumber':
suffix = '_red'
elif self._spectrum_axis == '2Theta':
suffix = '_2theta'
elif self._spectrum_axis == 'Q':
suffix = '_q'
elif self._spectrum_axis == 'Q2':
suffix = '_q2'
self._red_ws += suffix
# Nexus metadata criteria for FWS type of data (both EFWS and IFWS)
self._criteria = '($/entry0/instrument/Doppler/maximum_delta_energy$ == 0. or ' \
'$/entry0/instrument/Doppler/velocity_profile$ == 1)'
# make sure observable entry also exists (value is not important)
self._criteria += ' and ($/entry0/' + self._observable.replace('.', '/') + '$ or True)'
# force sort x-axis, if interpolation is requested
if ((self._back_option == 'Interpolate' and self._background_files) or
(self._calib_option == 'Interpolate' and self._calibration_files) or
(self._back_calib_option == 'Interpolate' and self._background_calib_files)) \
and not self._sortX:
self.log().warning('Interpolation option requested, X-axis will be sorted.')
self._sortX = True
# empty dictionary to keep track of all runs (ws names)
self._all_runs = dict()
def _filter_files(self, files, label):
'''
Filters the given list of files according to nexus criteria
@param files :: list of input files (i.e. , and + separated string)
@param label :: label of error message if nothing left after filtering
@throws RuntimeError :: when nothing left after filtering
@return :: the list of input files that passsed the criteria
'''
files = SelectNexusFilesByMetadata(files, self._criteria)
if not files:
raise RuntimeError('None of the {0} runs satisfied the FWS and Observable criteria.'.format(label))
else:
self.log().information('Filtered {0} runs are: {0} \\n'.format(label, files.replace(',', '\\n')))
return files
def _ifws_peak_bins(self, ws):
'''
Gives the bin indices of the first and last peaks (of spectra 0) in the IFWS
@param ws :: input workspace
return :: [xmin,xmax]
'''
y = mtd[ws].readY(0)
size = len(y)
mid = int(size / 2)
imin = np.nanargmax(y[0:mid])
imax = np.nanargmax(y[mid:size]) + mid
return imin, imax
def _ifws_integrate(self, wsgroup):
'''
Integrates IFWS over two peaks at the beginning and end
@param ws :: input workspace group
'''
for item in mtd[wsgroup]:
ws = item.getName()
size = item.blocksize()
imin, imax = self._ifws_peak_bins(ws)
x_values = item.readX(0)
int1 = '__int1_' + ws
int2 = '__int2_' + ws
Integration(InputWorkspace=ws, OutputWorkspace=int1,
RangeLower=x_values[0], RangeUpper=x_values[2*imin])
Integration(InputWorkspace=ws, OutputWorkspace=int2,
RangeLower=x_values[-2*(size-imax)], RangeUpper=x_values[-1])
Plus(LHSWorkspace=int1, RHSWorkspace=int2, OutputWorkspace=ws)
DeleteWorkspace(int1)
DeleteWorkspace(int2)
def _perform_unmirror(self, groupws):
'''
Sums the integrals of left and right for two wings, or returns the integral of one wing
@param ws :: group workspace containing one ws for one wing, and two ws for two wing data
'''
if mtd[groupws].getNumberOfEntries() == 2: # two wings, sum
left = mtd[groupws].getItem(0).getName()
right = mtd[groupws].getItem(1).getName()
sum = '__sum_'+groupws
left_monitor = mtd[left].getRun().getLogData('MonitorIntegral').value
right_monitor = mtd[right].getRun().getLogData('MonitorIntegral').value
if left_monitor != 0. and right_monitor != 0.:
sum_monitor = left_monitor + right_monitor
left_factor = left_monitor / sum_monitor
right_factor = right_monitor / sum_monitor
Scale(InputWorkspace=left, OutputWorkspace=left, Factor=left_factor)
Scale(InputWorkspace=right, OutputWorkspace=right, Factor=right_factor)
else:
self.log().notice('Zero monitor integral has been found in one (or both) wings;'
' left: {0}, right: {1}'.format(left_monitor, right_monitor))
Plus(LHSWorkspace=left, RHSWorkspace=right, OutputWorkspace=sum)
DeleteWorkspace(left)
DeleteWorkspace(right)
RenameWorkspace(InputWorkspace=sum, OutputWorkspace=groupws)
else:
RenameWorkspace(InputWorkspace=mtd[groupws].getItem(0), OutputWorkspace=groupws)
def PyExec(self):
self.setUp()
# total number of (unsummed) runs
total = self._sample_files.count(',')+self._background_files.count(',')+self._calibration_files.count(',')
self._progress = Progress(self, start=0.0, end=1.0, nreports=total)
self._reduce_multiple_runs(self._sample_files, self._SAMPLE)
if self._background_files:
self._reduce_multiple_runs(self._background_files, self._BACKGROUND)
back_ws = self._red_ws + '_' + self._BACKGROUND
Scale(InputWorkspace=back_ws, Factor=self._back_scaling, OutputWorkspace=back_ws)
if self._back_option == 'Sum':
self._integrate(self._BACKGROUND, self._SAMPLE)
else:
self._interpolate(self._BACKGROUND, self._SAMPLE)
self._subtract_background(self._BACKGROUND, self._SAMPLE)
DeleteWorkspace(back_ws)
if self._calibration_files:
self._reduce_multiple_runs(self._calibration_files, self._CALIBRATION)
if self._background_calib_files:
self._reduce_multiple_runs(self._background_calib_files, self._BACKCALIB)
back_calib_ws = self._red_ws + '_' + self._BACKCALIB
Scale(InputWorkspace=back_calib_ws, Factor=self._back_calib_scaling, OutputWorkspace=back_calib_ws)
if self._back_calib_option == 'Sum':
self._integrate(self._BACKCALIB, self._CALIBRATION)
else:
self._interpolate(self._BACKCALIB, self._CALIBRATION)
self._subtract_background(self._BACKCALIB, self._CALIBRATION)
DeleteWorkspace(back_calib_ws)
if self._calib_option == 'Sum':
self._integrate(self._CALIBRATION, self._SAMPLE)
else:
self._interpolate(self._CALIBRATION, self._SAMPLE)
self._calibrate()
DeleteWorkspace(self._red_ws + '_' + self._CALIBRATION)
self.log().debug('Run files map is :'+str(self._all_runs))
self.setProperty('OutputWorkspace',self._red_ws)
def _reduce_multiple_runs(self, files, label):
'''
Filters and reduces multiple files
@param files :: list of run paths
@param label :: output ws name
'''
files = self._filter_files(files, label)
for run in files.split(','):
self._reduce_run(run, label)
self._create_matrices(label)
def _reduce_run(self, run, label):
'''
Reduces the given (single or summed multiple) run
@param run :: run path
@param label :: sample, background or calibration
'''
runs_list = run.split('+')
runnumber = os.path.basename(runs_list[0]).split('.')[0]
ws = '__' + runnumber
if (len(runs_list) > 1):
ws += '_multiple'
ws += '_' + label
self._progress.report("Reducing run #" + runnumber)
IndirectILLEnergyTransfer(Run=run, OutputWorkspace=ws, **self._common_args)
energy = round(mtd[ws].getItem(0).getRun().getLogData('Doppler.maximum_delta_energy').value, 2)
if energy == 0.:
# Elastic, integrate over full energy range
Integration(InputWorkspace=ws, OutputWorkspace=ws)
else:
# Inelastic, do something more complex
self._ifws_integrate(ws)
ConvertToPointData(InputWorkspace=ws, OutputWorkspace=ws)
self._perform_unmirror(ws)
self._subscribe_run(ws, energy, label)
def _subscribe_run(self, ws, energy, label):
'''
Subscribes the given ws name to the map for given energy and label
@param ws :: workspace name
@param energy :: energy value
@param label :: sample, calibration or background
'''
if label in self._all_runs:
if energy in self._all_runs[label]:
self._all_runs[label][energy].append(ws)
else:
self._all_runs[label][energy] = [ws]
else:
self._all_runs[label] = dict()
self._all_runs[label][energy] = [ws]
def _integrate(self, label, reference):
'''
Averages the background or calibration intensities over all observable points at given energy
@param label :: calibration or background
@param reference :: sample or calibration
'''
for energy in self._all_runs[reference]:
if energy in self._all_runs[label]:
ws = self._insert_energy_value(self._red_ws + '_' + label, energy, label)
if mtd[ws].blocksize() > 1:
SortXAxis(InputWorkspace=ws, OutputWorkspace=ws)
axis = mtd[ws].readX(0)
start = axis[0]
end = axis[-1]
range = end-start
params = [start, range, end]
Rebin(InputWorkspace=ws, OutputWorkspace=ws, Params=params)
def _interpolate(self, label, reference):
'''
Interpolates the background or calibration intensities to
all observable points existing in sample at a given energy
@param label :: calibration or background
@param reference :: to interpolate to, can be sample or calibration
'''
for energy in self._all_runs[reference]:
if energy in self._all_runs[label]:
ws = self._insert_energy_value(self._red_ws + '_' + label, energy, label)
if reference == self._SAMPLE:
ref = self._insert_energy_value(self._red_ws, energy, reference)
else:
ref = self._insert_energy_value(self._red_ws + '_' + reference, energy, reference)
if mtd[ws].blocksize() > 1:
SplineInterpolation(WorkspaceToInterpolate=ws,
WorkspaceToMatch=ref,
OutputWorkspace=ws)
# TODO: add Linear2Point=True when ready
def _subtract_background(self, background, reference):
'''
Subtracts the background per each energy if background run is available
@param background :: background to subtract
@param reference :: to subtract from
'''
for energy in self._all_runs[reference]:
if energy in self._all_runs[background]:
if reference == self._SAMPLE:
lhs = self._insert_energy_value(self._red_ws, energy, reference)
else:
lhs = self._insert_energy_value(self._red_ws + '_' + reference, energy, reference)
rhs = self._insert_energy_value(self._red_ws + '_' + background, energy, background)
Minus(LHSWorkspace=lhs, RHSWorkspace=rhs, OutputWorkspace=lhs)
else:
self.log().warning('No background subtraction can be performed for doppler energy of {0} microEV, '
'since no background run was provided for the same energy value.'.format(energy))
def _calibrate(self):
'''
Performs calibration per each energy if calibration run is available
'''
for energy in self._all_runs[self._SAMPLE]:
if energy in self._all_runs[self._CALIBRATION]:
sample_ws = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
calib_ws = sample_ws + '_' + self._CALIBRATION
Divide(LHSWorkspace=sample_ws, RHSWorkspace=calib_ws, OutputWorkspace=sample_ws)
self._scale_calibration(sample_ws,calib_ws)
else:
self.log().warning('No calibration can be performed for doppler energy of {0} microEV, '
'since no calibration run was provided for the same energy value.'.format(energy))
def _scale_calibration(self, sample, calib):
'''
Scales sample workspace after calibration up by the maximum of integral intensity
in calibration run for each observable point
@param sample :: sample workspace after calibration
@param calib :: calibration workspace
'''
if mtd[calib].blocksize() == 1:
scale = np.max(mtd[calib].extractY()[:,0])
Scale(InputWorkspace=sample,Factor=scale,OutputWorkspace=sample,Operation='Multiply')
else:
# here calib and sample have the same size already
for column in range(mtd[sample].blocksize()):
scale = np.max(mtd[calib].extractY()[:,column])
for spectrum in range(mtd[sample].getNumberHistograms()):
mtd[sample].dataY(spectrum)[column] *= scale
mtd[sample].dataE(spectrum)[column] *= scale
def _get_observable_values(self, ws_list):
'''
Retrieves the needed sample log values for the given list of workspaces
@param ws_list :: list of workspaces
@returns :: array of observable values
@throws :: ValueError if the log entry is not a number nor time-stamp
'''
result = []
zero_time = 0
pattern = '%Y-%m-%dT%H:%M:%S'
for i,ws in enumerate(ws_list):
log = mtd[ws].getRun().getLogData(self._observable)
value = log.value
if log.type == 'number':
value = float(value)
else:
try:
value = time.mktime(time.strptime(value, pattern))
except ValueError:
raise ValueError("Invalid observable. "
"Provide a numeric (sample.*, run_number, etc.) or time-stamp "
"like string (e.g. start_time) log.")
if i == 0:
zero_time = value
value = value - zero_time
result.append(value)
return result
def _create_matrices(self, label):
'''
For each reduction type concatenates the workspaces putting the given sample log value as x-axis
Creates a group workspace for the given label, that contains 2D workspaces for each distinct energy value
@param label :: sample, background or calibration
'''
togroup = []
groupname = self._red_ws
if label != self._SAMPLE:
groupname += '_' + label
for energy in sorted(self._all_runs[label]):
ws_list = self._all_runs[label][energy]
wsname = self._insert_energy_value(groupname, energy, label)
togroup.append(wsname)
nspectra = mtd[ws_list[0]].getNumberHistograms()
observable_array = self._get_observable_values(self._all_runs[label][energy])
ConjoinXRuns(InputWorkspaces=ws_list, OutputWorkspace=wsname)
mtd[wsname].setDistribution(True)
run_list = '' # to set to sample logs
for ws in ws_list:
run = mtd[ws].getRun()
if run.hasProperty('run_number_list'):
run_list += run.getLogData('run_number_list').value.replace(', ', '+') + ','
else:
run_list += str(run.getLogData('run_number').value) + ','
AddSampleLog(Workspace=wsname, LogName='ReducedRunsList', LogText=run_list.rstrip(','))
for spectrum in range(nspectra):
mtd[wsname].setX(spectrum, np.array(observable_array))
if self._sortX:
SortXAxis(InputWorkspace=wsname, OutputWorkspace=wsname)
self._set_x_label(wsname)
for energy, ws_list in iteritems(self._all_runs[label]):
for ws in ws_list:
DeleteWorkspace(ws)
GroupWorkspaces(InputWorkspaces=togroup, OutputWorkspace=groupname)
def _set_x_label(self, ws):
'''
Sets the x-axis label
@param ws :: input workspace
'''
axis = mtd[ws].getAxis(0)
if self._observable == 'sample.temperature':
axis.setUnit("Label").setLabel('Temperature', 'K')
elif self._observable == 'sample.pressure':
axis.setUnit("Label").setLabel('Pressure', 'P')
elif 'time' in self._observable:
axis.setUnit("Label").setLabel('Time', 'seconds')
else:
axis.setUnit("Label").setLabel(self._observable, '')
def _insert_energy_value(self, ws_name, energy, label):
'''
Inserts the doppler's energy value in the workspace name
in between the user input and automatic suffix
@param ws_name : workspace name
@param energy : energy value
@param label : sample, background, or calibration
@return : new name with energy value inside
Example:
user_input_2theta > user_input_1.5_2theta
user_input_red_background > user_input_1.5_red_background
'''
suffix_pos = ws_name.rfind('_')
if label != self._SAMPLE:
# find second to last underscore
suffix_pos = ws_name.rfind('_', 0, suffix_pos)
return ws_name[:suffix_pos] + '_' + str(energy) + ws_name[suffix_pos:]
# Register algorithm with Mantid
AlgorithmFactory.subscribe(IndirectILLReductionFWS)
|
wdzhou/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
|
Python
|
gpl-3.0
| 26,218
|
[
"CRYSTAL"
] |
e383b390b954402e937269f6ab4900f1dc78ddbd7c4acc16cd6280ee6b19cc89
|
# Basic imports
import os
import sys
import inspect
import debug
from lxml import etree
import collections
def parseAndValidateWithSchema(modelName, modelPath) :
prefixPath = ''
if modelName == 'xml' :
schemaPath = os.path.join(prefixPath, 'schema/moose/moose.xsd')
if not os.path.isfile(schemaPath) :
debug.dump("WARN", "Schema {0} does not exists..".format(schemaPath))
try :
schemaH = open(schemaPath, "r")
schemaText = schemaH.read()
schemaH.close()
except Exception as e :
debug.dump("WARN", "Error reading schema for validation."+
" Falling back to validation-disabled parser."
+ " Failed with error {0}".format(e))
return parseWithoutValidation(modelName, modelPath)
# Now we have the schema text
schema = etree.XMLSchema(etree.XML(schemaText))
xmlParser = etree.XMLParser(schema=schema, remove_comments=True)
with open(modelPath, "r") as xmlTextFile :
return etree.parse(xmlTextFile, xmlParser)
def parseWithoutValidation(modelName, modelPath) :
xmlParser = etree.XMLParser(remove_comments=True)
try :
xmlRootElem = etree.parse(modelPath, xmlParser)
except Exception as e :
debug.dump("ERROR", "Parsing of {0} failed.".format(modelPath))
debug.dump("DEBUG", "Error: {0}".format(e))
raise RuntimeError, "Failed to parse XML"
return xmlRootElem
def parseXMLs(commandLineArgs, validate=False) :
xmlRootElemDict = collections.defaultdict(list)
models = vars(commandLineArgs)
for model in models :
if models[model] :
for modelPath in models[model] :
debug.dump("INFO", "Parsing {0}".format(models[model]))
if validate :
# parse model and valid it with schama
modelXMLRootElem = parseAndValidateWithSchema(model, modelPath)
else :
# Simple parse the model without validating it with schema.
modelXMLRootElem = parseWithoutValidation(model, modelPath)
if modelXMLRootElem :
xmlRootElemDict[model].append((modelXMLRootElem, modelPath))
assert len(xmlRootElemDict) > 0
return xmlRootElemDict
|
rahulgayatri23/moose-core
|
python/libmumbl/utility/xml_parser.py
|
Python
|
gpl-3.0
| 2,282
|
[
"MOOSE"
] |
d2a22f3902e3f231ea54a6443f437efb3a35373dd19b88ea879171724fa4097a
|
# -*- coding: utf-8 -*-
"""
End-to-end tests related to the cohort management on the LMS Instructor Dashboard
"""
import os
import uuid
from datetime import datetime
import unicodecsv
from bok_choy.promise import EmptyPromise
from pytz import UTC, utc
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.instructor_dashboard import DataDownloadPage, InstructorDashboardPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import Group
@attr(shard=8)
class CohortConfigurationTest(EventsTestMixin, UniqueCourseTest, CohortTestMixin):
"""
Tests for cohort management on the LMS Instructor Dashboard
"""
def setUp(self):
"""
Set up a cohorted course
"""
super(CohortConfigurationTest, self).setUp()
# create course with cohorts
self.manual_cohort_name = "ManualCohort1"
self.auto_cohort_name = "AutoCohort1"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture, auto_cohort_groups=[self.auto_cohort_name])
self.manual_cohort_id = self.add_manual_cohort(self.course_fixture, self.manual_cohort_name)
# create a non-instructor who will be registered for the course and in the manual cohort.
self.student_name, self.student_email = self._generate_unique_user_data()
self.student_id = AutoAuthPage(
self.browser, username=self.student_name, email=self.student_email,
course_id=self.course_id, staff=False
).visit().get_user_id()
self.add_user_to_cohort(self.course_fixture, self.student_name, self.manual_cohort_id)
# create a second student user
self.other_student_name, self.other_student_email = self._generate_unique_user_data()
self.other_student_id = AutoAuthPage(
self.browser, username=self.other_student_name, email=self.other_student_email,
course_id=self.course_id, staff=False
).visit().get_user_id()
# login as an instructor
self.instructor_name, self.instructor_email = self._generate_unique_user_data()
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email=self.instructor_email,
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()
def verify_cohort_description(self, cohort_name, expected_description):
"""
Selects the cohort with the given name and verifies the expected description is presented.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.assertEquals(self.cohort_management_page.get_selected_cohort(), cohort_name)
self.assertIn(expected_description, self.cohort_management_page.get_cohort_group_setup())
def test_cohort_description(self):
"""
Scenario: the cohort configuration management in the instructor dashboard specifies whether
students are automatically or manually assigned to specific cohorts.
Given I have a course with a manual cohort and an automatic cohort defined
When I view the manual cohort in the instructor dashboard
There is text specifying that students are only added to the cohort manually
And when I view the automatic cohort in the instructor dashboard
There is text specifying that students are automatically added to the cohort
"""
self.verify_cohort_description(
self.manual_cohort_name,
'Learners are added to this cohort only when you provide '
'their email addresses or usernames on this page',
)
self.verify_cohort_description(
self.auto_cohort_name,
'Learners are added to this cohort automatically',
)
def test_no_content_groups(self):
"""
Scenario: if the course has no content groups defined (user_partitions of type cohort),
the settings in the cohort management tab reflect this
Given I have a course with a cohort defined but no content groups
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is text stating that no content groups are defined
And I cannot select the radio button to enable content group association
And there is a link I can select to open Group settings in Studio
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertEqual(
"Warning:\nNo content groups exist. Create a content group",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.assertFalse(self.cohort_management_page.select_content_group_radio_button())
self.cohort_management_page.select_studio_group_settings()
group_settings_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_settings_page.wait_for_page()
def test_add_students_to_cohort_success(self):
"""
Scenario: When students are added to a cohort, the appropriate notification is shown.
Given I have a course with two cohorts
And there is a user in one cohort
And there is a user in neither cohort
When I add the two users to the cohort that initially had no users
Then there are 2 users in total in the cohort
And I get a notification that 2 users have been added to the cohort
And I get a notification that 1 user was moved from the other cohort
And the user input field is empty
And appropriate events have been emitted
"""
start_time = datetime.now(UTC)
self.cohort_management_page.select_cohort(self.auto_cohort_name)
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for added students'
).fulfill()
confirmation_messages = self.cohort_management_page.get_cohort_confirmation_messages()
self.assertEqual(
[
"2 learners have been added to this cohort.",
"1 learner was moved from " + self.manual_cohort_name
],
confirmation_messages
)
self.assertEqual("", self.cohort_management_page.get_cohort_student_input_field_value())
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id), int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
2
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.instructor_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": None,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": self.manual_cohort_name,
}).count(),
1
)
def test_add_students_to_cohort_failure(self):
"""
Scenario: When errors occur when adding students to a cohort, the appropriate notification is shown.
Given I have a course with a cohort and a user already in it
When I add the user already in a cohort to that same cohort
And I add a non-existing user to that cohort
Then there is no change in the number of students in the cohort
And I get a notification that one user was already in the cohort
And I get a notification that one user is unknown
And the user input field still contains the incorrect email addresses
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, "unknown_user"])
# Wait for notification messages to appear, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == len(self.cohort_management_page.get_cohort_confirmation_messages()), 'Waiting for notification'
).fulfill()
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.assertEqual(
[
"0 learners have been added to this cohort.",
"1 learner was already in the cohort"
],
self.cohort_management_page.get_cohort_confirmation_messages()
)
self.assertEqual(
[
"There was an error when trying to add learners:",
"Unknown username: unknown_user"
],
self.cohort_management_page.get_cohort_error_messages()
)
self.assertEqual(
self.student_name + ",unknown_user,",
self.cohort_management_page.get_cohort_student_input_field_value()
)
def _verify_cohort_settings(
self,
cohort_name,
assignment_type=None,
new_cohort_name=None,
new_assignment_type=None,
verify_updated=False
):
"""
Create a new cohort and verify the new and existing settings.
"""
start_time = datetime.now(UTC)
self.assertNotIn(cohort_name, self.cohort_management_page.get_cohorts())
self.cohort_management_page.add_cohort(cohort_name, assignment_type=assignment_type)
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
# After adding the cohort, it should automatically be selected and its
# assignment_type should be "manual" as this is the default assignment type
_assignment_type = assignment_type or 'manual'
msg = "Waiting for currently selected cohort assignment type"
EmptyPromise(
lambda: _assignment_type == self.cohort_management_page.get_cohort_associated_assignment_type(), msg
).fulfill()
# Go back to Manage Students Tab
self.cohort_management_page.select_manage_settings()
self.cohort_management_page.add_students_to_selected_cohort([self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 1 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for student to be added'
).fulfill()
self.assertFalse(self.cohort_management_page.is_assignment_settings_disabled)
self.assertEqual('', self.cohort_management_page.assignment_settings_message)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.created",
"time": {"$gt": start_time},
"event.cohort_name": cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.creation_requested",
"time": {"$gt": start_time},
"event.cohort_name": cohort_name,
}).count(),
1
)
if verify_updated:
self.cohort_management_page.select_cohort(cohort_name)
self.cohort_management_page.select_cohort_settings()
self.cohort_management_page.set_cohort_name(new_cohort_name)
self.cohort_management_page.set_assignment_type(new_assignment_type)
self.cohort_management_page.save_cohort_settings()
# If cohort name is empty, then we should get/see an error message.
if not new_cohort_name:
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages(type='error')
self.assertEqual(
["The cohort cannot be saved", "You must specify a name for the cohort"],
confirmation_messages
)
else:
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.assertEqual(new_cohort_name, self.cohort_management_page.cohort_name_in_header)
self.assertIn(new_cohort_name, self.cohort_management_page.get_cohorts())
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.assertEqual(
new_assignment_type,
self.cohort_management_page.get_cohort_associated_assignment_type()
)
def _create_csv_file(self, filename, csv_text_as_lists):
"""
Create a csv file with the provided list of lists.
:param filename: this is the name that will be used for the csv file. Its location will
be under the test upload data directory
:param csv_text_as_lists: provide the contents of the csv file int he form of a list of lists
"""
filename = self.instructor_dashboard_page.get_asset_path(filename)
with open(filename, 'w+') as csv_file:
writer = unicodecsv.writer(csv_file)
for line in csv_text_as_lists:
writer.writerow(line)
self.addCleanup(os.remove, filename)
def _generate_unique_user_data(self):
"""
Produce unique username and e-mail.
"""
unique_username = 'user' + str(uuid.uuid4().hex)[:12]
unique_email = unique_username + "@example.com"
return unique_username, unique_email
def test_add_new_cohort(self):
"""
Scenario: A new manual cohort can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort to "manual" because this is the default
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type=None)
def test_add_new_cohort_with_manual_assignment_type(self):
"""
Scenario: A new cohort with manual assignment type can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort with manual assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "manual"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type='manual')
def test_add_new_cohort_with_random_assignment_type(self):
"""
Scenario: A new cohort with random assignment type can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
self._verify_cohort_settings(cohort_name=cohort_name, assignment_type='random')
def test_update_existing_cohort_settings(self):
"""
Scenario: Update existing cohort settings(cohort name, assignment type)
Given I have a course with a user in the course
When I add a new cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
Then I select the cohort (that you just created) from existing cohorts
Then I change its name and assignment type set to "manual"
Then I Save the settings
And cohort with new name is present in cohorts dropdown list
And cohort assignment type should be "manual"
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
new_cohort_name = '{old}__NEW'.format(old=cohort_name)
self._verify_cohort_settings(
cohort_name=cohort_name,
assignment_type='random',
new_cohort_name=new_cohort_name,
new_assignment_type='manual',
verify_updated=True
)
def test_update_existing_cohort_settings_with_empty_cohort_name(self):
"""
Scenario: Update existing cohort settings(cohort name, assignment type).
Given I have a course with a user in the course
When I add a new cohort with random assignment type to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And assignment type of displayed cohort is "random"
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
Then I select a cohort from existing cohorts
Then I set its name as empty string and assignment type set to "manual"
And I click on Save button
Then I should see an error message
"""
cohort_name = str(uuid.uuid4().get_hex()[0:20])
new_cohort_name = ''
self._verify_cohort_settings(
cohort_name=cohort_name,
assignment_type='random',
new_cohort_name=new_cohort_name,
new_assignment_type='manual',
verify_updated=True
)
def test_default_cohort_assignment_settings(self):
"""
Scenario: Cohort assignment settings are disabled for default cohort.
Given I have a course with a user in the course
And I have added a manual cohort
And I have added a random cohort
When I select the random cohort
Then cohort assignment settings are disabled
"""
self.cohort_management_page.select_cohort("AutoCohort1")
self.cohort_management_page.select_cohort_settings()
self.assertTrue(self.cohort_management_page.is_assignment_settings_disabled)
message = "There must be one cohort to which students can automatically be assigned."
self.assertEqual(message, self.cohort_management_page.assignment_settings_message)
def test_cohort_enable_disable(self):
"""
Scenario: Cohort Enable/Disable checkbox related functionality is working as intended.
Given I have a cohorted course with a user.
And I can see the `Enable Cohorts` checkbox is checked.
And cohort management controls are visible.
When I uncheck the `Enable Cohorts` checkbox.
Then cohort management controls are not visible.
And When I reload the page.
Then I can see the `Enable Cohorts` checkbox is unchecked.
And cohort management controls are not visible.
"""
self.assertTrue(self.cohort_management_page.is_cohorted)
self.assertTrue(self.cohort_management_page.cohort_management_controls_visible())
self.cohort_management_page.is_cohorted = False
self.assertFalse(self.cohort_management_page.cohort_management_controls_visible())
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.assertFalse(self.cohort_management_page.is_cohorted)
self.assertFalse(self.cohort_management_page.cohort_management_controls_visible())
def test_link_to_data_download(self):
"""
Scenario: a link is present from the cohort configuration in
the instructor dashboard to the Data Download section.
Given I have a course with a cohort defined
When I view the cohort in the LMS instructor dashboard
There is a link to take me to the Data Download section of the Instructor Dashboard.
"""
self.cohort_management_page.select_data_download()
data_download_page = DataDownloadPage(self.browser)
data_download_page.wait_for_page()
def test_cohort_by_csv_both_columns(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using both emails and usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via both usernames and emails
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['username', 'email', 'ignored_column', 'cohort'],
[self.instructor_name, '', 'June', 'ManualCohort1'],
['', self.student_email, 'Spring', 'AutoCohort1'],
[self.other_student_name, '', 'Fall', 'ManualCohort1'],
]
filename = "cohort_csv_both_columns_1.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
def test_cohort_by_csv_only_email(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only emails.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only emails
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['email', 'cohort'],
[self.instructor_email, 'ManualCohort1'],
[self.student_email, 'AutoCohort1'],
[self.other_student_email, 'ManualCohort1'],
]
filename = "cohort_csv_emails_only.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
def test_cohort_by_csv_only_username(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only usernames
Then I can download a file with results
And appropriate events have been emitted
"""
csv_contents = [
['username', 'cohort'],
[self.instructor_name, 'ManualCohort1'],
[self.student_name, 'AutoCohort1'],
[self.other_student_name, 'ManualCohort1'],
]
filename = "cohort_users_only_username1.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename)
# TODO: Change unicode_hello_in_korean = u'ßßßßßß' to u'안녕하세요', after up gradation of Chrome driver. See TNL-3944
def test_cohort_by_csv_unicode(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using both emails and usernames.
Given I have a course with two cohorts defined
And I add another cohort with a unicode name
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to the unicode cohort via both usernames and emails
Then I can download a file with results
TODO: refactor events verification to handle this scenario. Events verification assumes movements
between other cohorts (manual and auto).
"""
unicode_hello_in_korean = u'ßßßßßß'
self._verify_cohort_settings(cohort_name=unicode_hello_in_korean, assignment_type=None)
csv_contents = [
['username', 'email', 'cohort'],
[self.instructor_name, '', unicode_hello_in_korean],
['', self.student_email, unicode_hello_in_korean],
[self.other_student_name, '', unicode_hello_in_korean]
]
filename = "cohort_unicode_name.csv"
self._create_csv_file(filename, csv_contents)
self._verify_csv_upload_acceptable_file(filename, skip_events=True)
def _verify_csv_upload_acceptable_file(self, filename, skip_events=None):
"""
Helper method to verify cohort assignments after a successful CSV upload.
When skip_events is specified, no assertions are made on events.
"""
start_time = datetime.now(UTC)
self.cohort_management_page.upload_cohort_file(filename)
self._verify_cohort_by_csv_notification(
u"Your file '{}' has been uploaded. Allow a few minutes for processing.".format(filename)
)
if not skip_events:
# student_user is moved from manual cohort to auto cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# instructor_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# other_student_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.other_student_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# Verify the results can be downloaded.
data_download = self.instructor_dashboard_page.select_data_download()
data_download.wait_for_available_report()
report = data_download.get_available_reports_for_download()[0]
base_file_name = "cohort_results_"
self.assertIn("{}_{}".format(
'_'.join([self.course_info['org'], self.course_info['number'], self.course_info['run']]), base_file_name
), report)
report_datetime = datetime.strptime(
report[report.index(base_file_name) + len(base_file_name):-len(".csv")],
"%Y-%m-%d-%H%M"
)
self.assertLessEqual(start_time.replace(second=0, microsecond=0), utc.localize(report_datetime))
def test_cohort_by_csv_wrong_file_type(self):
"""
Scenario: if the instructor uploads a non-csv file, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a file without the CSV extension
Then I get an error message stating that the file must have a CSV extension
"""
self.cohort_management_page.upload_cohort_file("image.jpg")
self._verify_cohort_by_csv_notification("The file must end with the extension '.csv'.")
def test_cohort_by_csv_missing_cohort(self):
"""
Scenario: if the instructor uploads a csv file with no cohort column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing the cohort column
Then I get an error message stating that the file must have a cohort column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_cohort_column.csv")
self._verify_cohort_by_csv_notification("The file must contain a 'cohort' column containing cohort names.")
def test_cohort_by_csv_missing_user(self):
"""
Scenario: if the instructor uploads a csv file with no username or email column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing both the username and email columns
Then I get an error message stating that the file must have either a username or email column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_user_columns.csv")
self._verify_cohort_by_csv_notification(
"The file must contain a 'username' column, an 'email' column, or both."
)
def _verify_cohort_by_csv_notification(self, expected_message):
"""
Helper method to check the CSV file upload notification message.
"""
# Wait for notification message to appear, indicating file has been uploaded.
EmptyPromise(
lambda: 1 == len(self.cohort_management_page.get_csv_messages()), 'Waiting for notification'
).fulfill()
messages = self.cohort_management_page.get_csv_messages()
self.assertEquals(expected_message, messages[0])
@attr('a11y')
def test_cohorts_management_a11y(self):
"""
Run accessibility audit for cohort management.
"""
self.cohort_management_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.cohort_management_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=6)
class CohortContentGroupAssociationTest(UniqueCourseTest, CohortTestMixin):
"""
Tests for linking between content groups and cohort in the instructor dashboard.
"""
def setUp(self):
"""
Set up a cohorted course with a user_partition of scheme "cohort".
"""
super(CohortContentGroupAssociationTest, self).setUp()
# create course with single cohort and two content groups (user_partition of type "cohort")
self.cohort_name = "OnlyCohort"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture)
self.cohort_id = self.add_manual_cohort(self.course_fixture, self.cohort_name)
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("0", 'Apples'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
# login as an instructor
self.instructor_name = "instructor_user"
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email="instructor_user@example.com",
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management_page = self.instructor_dashboard_page.select_cohort_management()
def test_no_content_group_linked(self):
"""
Scenario: In a course with content groups, cohorts are initially not linked to a content group
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is no text stating that content groups are undefined
And the content groups are listed in the selector
"""
self.cohort_management_page.select_cohort(self.cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Apples", "Bananas"], self.cohort_management_page.get_all_content_groups())
def test_link_to_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be linked to content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.assertEqual("Bananas", self.cohort_management_page.get_cohort_associated_content_group())
def test_unlink_from_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be unlinked from content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And I reload the page
And I view the cohort in the instructor dashboard and select settings
And I unlink the cohort from any content group and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to any content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.cohort_management_page.set_cohort_associated_content_group(None)
self._verify_settings_saved_and_reload(self.cohort_name)
self.assertEqual(None, self.cohort_management_page.get_cohort_associated_content_group())
def test_create_new_cohort_linked_to_content_group(self):
"""
Scenario: In a course with content groups, a new cohort can be linked to a content group
at time of creation.
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
Then when I select settings I see that the cohort is linked to the content group
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
new_cohort = "correctly linked cohort"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Apples", self.cohort_management_page.get_cohort_associated_content_group())
def test_missing_content_group(self):
"""
Scenario: In a course with content groups, if a cohort is associated with a content group that no longer
exists, a warning message is shown
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
And I delete that content group from the course
And I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the settings display a message that the content group no longer exists
And when I select a different content group and save
Then the error message goes away
"""
new_cohort = "linked to missing content group"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("2", 'Pears'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Deleted Content Group", self.cohort_management_page.get_cohort_associated_content_group())
self.assertEquals(
["Bananas", "Pears", "Deleted Content Group"],
self.cohort_management_page.get_all_content_groups()
)
self.assertEqual(
"Warning:\nThe previously selected content group was deleted. Select another content group.",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.cohort_management_page.set_cohort_associated_content_group("Pears")
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Bananas", "Pears"], self.cohort_management_page.get_all_content_groups())
def _create_new_cohort_linked_to_content_group(self, new_cohort, cohort_group):
"""
Creates a new cohort linked to a content group.
"""
self.cohort_management_page.add_cohort(new_cohort, content_group=cohort_group)
self.assertEqual(cohort_group, self.cohort_management_page.get_cohort_associated_content_group())
def _link_cohort_to_content_group(self, cohort_name, content_group):
"""
Links a cohort to a content group. Saves the changes and verifies the cohort updated properly.
Then refreshes the page and selects the cohort.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.cohort_management_page.set_cohort_associated_content_group(content_group)
self._verify_settings_saved_and_reload(cohort_name)
def _verify_settings_saved_and_reload(self, cohort_name):
"""
Verifies the confirmation message indicating that a cohort's settings have been updated.
Then refreshes the page and selects the cohort.
"""
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(cohort_name)
|
jolyonb/edx-platform
|
common/test/acceptance/tests/discussion/test_cohort_management.py
|
Python
|
agpl-3.0
| 42,473
|
[
"VisIt"
] |
d46c2d349d7dd823ffc1c1f519c82a5e83751dcdb4cdf6f4fd7cc84c3e21cb62
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def cv_cars_gbm():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = 1 #random.sample(list(range(3)),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print("Distribution: {0}".format(distribution))
print("Response column: {0}".format(response_col))
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm1.train(x=predictors, y=response_col, training_frame=cars)
gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Random")
gbm1.train(x=predictors, y=response_col, training_frame=cars)
gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Random")
gbm2.train(x=predictors, y=response_col, training_frame=cars)
try:
pyunit_utils.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = H2OGradientBoostingEstimator(distribution=distribution,
ntrees=5,
keep_cross_validation_models=True,
keep_cross_validation_predictions=True)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow, distribution=distribution,ntrees=5,
fold_assignment="Modulo")
gbm.train(x=predictors, y=response_col, training_frame=cars)
# 2. nfolds = 0
gbm1 = H2OGradientBoostingEstimator(nfolds=0, distribution=distribution, ntrees=5)
gbm1.train(x=predictors, y=response_col,training_frame=cars)
# check that this is equivalent to no nfolds
gbm2 = H2OGradientBoostingEstimator(distribution=distribution, ntrees=5)
gbm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = H2OGradientBoostingEstimator(nfolds=random.randint(3,10),
ntrees=5,
distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, validation_frame=cars)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = H2OGradientBoostingEstimator(nfolds=random.sample([-1,1],1)[0],
ntrees=5,
distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow+1,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = H2OGradientBoostingEstimator(nfolds=3, ntrees=5, distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# 4. fold_column and fold_assignment both specified
try:
gbm = H2OGradientBoostingEstimator(ntrees=5, fold_assignment="Random", distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_cars_gbm)
else:
cv_cars_gbm()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_cv_cars_gbm.py
|
Python
|
apache-2.0
| 7,143
|
[
"Gaussian"
] |
fbb607408cb7f80195976a7d498026f73f7b1542d2e50021a9652534eaaa6aaf
|
"""This is the main file you run to start a pinball machine."""
# mpf.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from datetime import datetime
import socket
import os
from optparse import OptionParser
import errno
import version
import sys
from mpf.system.machine import MachineController
# Allow command line options to do things
# We use optparse instead of argpase so python 2.6 works
parser = OptionParser()
parser.add_option("-C", "--mpfconfigfile",
action="store", type="string", dest="mpfconfigfile",
default=os.path.join("mpf", "mpfconfig.yaml"),
help="The MPF framework config file")
parser.add_option("-c", "--configfile",
action="store", type="string", dest="configfile",
default="config.yaml",
help="Specifies the location of the first machine config "
"file")
parser.add_option("-l", "--logfile",
action="store", type="string", dest="logfile",
default=os.path.join("logs", datetime.now().strftime(
"%Y-%m-%d-%H-%M-%S-mpf-" + socket.gethostname() + ".log")),
help="Specifies the name (and path) of the log file")
parser.add_option("-v", "--verbose",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.INFO, help="Enables verbose logging to the "
"log file")
parser.add_option("-V", "--verboseconsole",
action="store_true", dest="consoleloglevel",
default=logging.INFO,
help="Enables verbose logging to the console. Do NOT on "
"Windows platforms")
parser.add_option("-o", "--optimized",
action="store_true", dest="optimized", default=False,
help="Enables performance optimized game loop")
parser.add_option("-x", "--nohw",
action="store_false", dest="physical_hw", default=True,
help="Specifies physical game hardware is not connected")
parser.add_option("--versions",
action="store_true", dest="version", default=False,
help="Shows the MPF version and quits")
(options, args) = parser.parse_args()
options_dict = vars(options) # convert the values instance to python dict
# if --version was passed, print the version and quit
if options_dict['version']:
print "Mission Pinball Framework version:", version.__version__
print "Requires Config File version:", version.__config_version__
sys.exit()
# add the first positional argument into the options dict as the machine path
try:
options_dict['machinepath'] = args[0]
except:
print "Error: You need to specify the path to your machine_files folder "\
"for the game you want to run."
sys.exit()
# Configure logging. Creates a logfile and logs to the console.
# Formating options are documented here:
# https://docs.python.org/2.7/library/logging.html#logrecord-attributes
try:
os.makedirs('logs')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
logging.basicConfig(level=options.loglevel,
format='%(asctime)s : %(levelname)s : %(name)s : %(message)s',
filename=options.logfile,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(options.consoleloglevel)
# set a format which is simpler for console use
formatter = logging.Formatter('%(levelname)s : %(name)s : %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def main():
try:
machine = MachineController(options_dict)
machine.run()
logging.info("MPF run loop ended.")
except Exception, e:
logging.exception(e)
sys.exit()
if __name__ == '__main__':
main()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf.py
|
Python
|
mit
| 5,347
|
[
"Brian"
] |
0bc37921bab5c90011e0185a17cb0ed8b779f3747ed49aae5379ee62a60fcd7d
|
from setuptools import setup
setup(
name = "GPclust",
version = "0.1.0",
author = "James Hensman",
author_email = "james.hensman@sheffield.ac.uk",
url = "http://staffwww.dcs.sheffield.ac.uk/people/J.Hensman/gpclust.html",
description = ("Clustering of time series using Gaussian processes and variational Bayes"),
license = "GPL v3",
keywords = " clustering Gaussian-process machine-learning",
download_url = 'https://github.com/jameshensman/gpclust/tarball/0.1',
packages=['GPclust'],
install_requires=['GPy>=0.6'],
classifiers=[]
)
|
jameshensman/GPclust
|
setup.py
|
Python
|
gpl-3.0
| 583
|
[
"Gaussian"
] |
3be3cf22c2897160cdb4e12642992a3625d51263c3ead1013440eb69163d53bc
|
import numpy as np
import os
from os.path import join as pjoin
from numpy.testing import assert_raises, assert_array_equal
from tempfile import mktemp
import nibabel as nib
from surfer import Brain
from surfer import io, utils
from surfer.utils import requires_fsaverage
from mayavi import mlab
subj_dir = utils._get_subjects_dir()
subject_id = 'fsaverage'
std_args = [subject_id, 'lh', 'inflated']
data_dir = pjoin(os.path.split(__file__)[0], '..', '..',
'examples', 'example_data')
small_brain = dict(size=100)
def has_freesurfer():
if 'FREESURFER_HOME' not in os.environ:
return False
else:
return True
requires_fs = np.testing.dec.skipif(not has_freesurfer(),
'Requires FreeSurfer command line tools')
@requires_fsaverage
def test_offscreen():
"""Test offscreen rendering
"""
mlab.options.backend = 'auto'
brain = Brain(*std_args, offscreen=True)
shot = brain.screenshot()
assert_array_equal(shot.shape, (800, 800, 3))
@requires_fsaverage
def test_image():
"""Test image saving
"""
mlab.options.backend = 'auto'
brain = Brain(*std_args, config_opts=small_brain)
tmp_name = mktemp() + '.png'
brain.save_image(tmp_name)
brain.save_imageset(tmp_name, ['med', 'lat'], 'jpg')
brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='v')
brain.screenshot()
brain.close()
@requires_fsaverage
def test_brains():
"""Test plotting of Brain with different arguments
"""
# testing backend breaks when passing in a figure, so we use 'auto' here
# (shouldn't affect usability, but it makes testing more annoying)
mlab.options.backend = 'auto'
surfs = ['inflated', 'sphere']
hemis = ['lh', 'rh']
curvs = [True, False]
titles = [None, 'Hello']
config_opts = [{}, dict(size=(800, 800))]
figs = [None, mlab.figure()]
subj_dirs = [None, subj_dir]
for surf, hemi, curv, title, co, fig, sd \
in zip(surfs, hemis, curvs, titles, config_opts, figs, subj_dirs):
brain = Brain(subject_id, hemi, surf, curv, title, co, fig, sd)
brain.close()
assert_raises(ValueError, Brain, subject_id, 'lh', 'inflated',
subjects_dir='')
@requires_fsaverage
def test_annot():
"""Test plotting of annot
"""
mlab.options.backend = 'test'
annots = ['aparc', 'aparc.a2005s']
borders = [True, False]
alphas = [1, 0.5]
brain = Brain(*std_args)
for a, b, p in zip(annots, borders, alphas):
brain.add_annotation(a, b, p)
brain.close()
@requires_fsaverage
def test_contour():
"""Test plotting of contour overlay
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
overlay_file = pjoin(data_dir, "lh.sig.nii.gz")
brain.add_contour_overlay(overlay_file)
brain.add_contour_overlay(overlay_file, max=20, n_contours=9,
line_width=2)
brain.contour['surface'].actor.property.line_width = 1
brain.contour['surface'].contour.number_of_contours = 10
brain.close()
@requires_fsaverage
@requires_fs
def test_data():
"""Test plotting of data
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
mri_file = pjoin(data_dir, 'resting_corr.nii.gz')
reg_file = pjoin(data_dir, 'register.dat')
surf_data = io.project_volume_data(mri_file, "lh", reg_file)
brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7)
brain.close()
@requires_fsaverage
def test_foci():
"""Test plotting of foci
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
coords = [[-36, 18, -3],
[-43, 25, 24],
[-48, 26, -2]]
brain.add_foci(coords, map_surface="white", color="gold")
annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot')
ids, ctab, names = nib.freesurfer.read_annot(annot_path)
verts = np.arange(0, len(ids))
coords = np.random.permutation(verts[ids == 74])[:10]
scale_factor = 0.7
brain.add_foci(coords, coords_as_verts=True,
scale_factor=scale_factor, color="#A52A2A")
brain.close()
@requires_fsaverage
def test_label():
"""Test plotting of label
"""
mlab.options.backend = 'test'
subject_id = "fsaverage"
hemi = "lh"
surf = "smoothwm"
brain = Brain(subject_id, hemi, surf)
brain.add_label("BA1")
brain.add_label("BA1", color="blue", scalar_thresh=.5)
label_file = pjoin(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
brain.add_label("BA44", borders=True)
brain.add_label("BA6", alpha=.7)
brain.show_view("medial")
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
brain.close()
@requires_fsaverage
def test_meg_inverse():
"""Test plotting of MEG inverse solution
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc')
stc = io.read_stc(stc_fname)
data = stc['data']
vertices = stc['vertices']
time = 1e3 * np.linspace(stc['tmin'],
stc['tmin'] + data.shape[1] * stc['tstep'],
data.shape[1])
colormap = 'hot'
time_label = 'time=%0.2f ms'
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=10, time=time, time_label=time_label)
brain.set_data_time_index(2)
brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
# viewer = TimeViewer(brain)
brain.close()
@requires_fsaverage
def test_morphometry():
"""Test plotting of morphometry
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.add_morphometry("curv")
brain.add_morphometry("sulc", grayscale=True)
brain.add_morphometry("thickness")
brain.close()
@requires_fsaverage
def test_overlay():
"""Test plotting of overlay
"""
mlab.options.backend = 'test'
# basic overlay support
overlay_file = pjoin(data_dir, "lh.sig.nii.gz")
brain = Brain(*std_args)
brain.add_overlay(overlay_file)
brain.overlays["sig"].remove()
brain.add_overlay(overlay_file, min=5, max=20, sign="pos")
sig1 = io.read_scalar_data(pjoin(data_dir, "lh.sig.nii.gz"))
sig2 = io.read_scalar_data(pjoin(data_dir, "lh.alt_sig.nii.gz"))
thresh = 4
sig1[sig1 < thresh] = 0
sig2[sig2 < thresh] = 0
conjunct = np.min(np.vstack((sig1, sig2)), axis=0)
brain.add_overlay(sig1, 4, 30, name="sig1")
brain.overlays["sig1"].pos_bar.lut_mode = "Reds"
brain.overlays["sig1"].pos_bar.visible = False
brain.add_overlay(sig2, 4, 30, name="sig2")
brain.overlays["sig2"].pos_bar.lut_mode = "Blues"
brain.overlays["sig2"].pos_bar.visible = False
brain.add_overlay(conjunct, 4, 30, name="conjunct")
brain.overlays["conjunct"].pos_bar.lut_mode = "Purples"
brain.overlays["conjunct"].pos_bar.visible = False
brain.close()
@requires_fsaverage
def test_probabilistic_labels():
"""Test plotting of probabilistic labels
"""
mlab.options.backend = 'test'
brain = Brain("fsaverage", "lh", "inflated",
config_opts=dict(cortex="low_contrast"))
brain.add_label("BA1", color="darkblue")
brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5)
brain.add_label("BA45", color="firebrick", borders=True)
brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5)
label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label")
prob_field = np.zeros_like(brain._geo.x)
ids, probs = io.read_label(label_file, read_scalars=True)
prob_field[ids] = probs
brain.add_data(prob_field, thresh=1e-5)
brain.data["colorbar"].number_of_colors = 10
brain.data["colorbar"].number_of_labels = 11
brain.close()
@requires_fsaverage
def test_text():
"""Test plotting of text
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.add_text(0.1, 0.1, 'Hello', 'blah')
brain.close()
@requires_fsaverage
def test_animate():
"""Test animation
"""
mlab.options.backend = 'auto'
brain = Brain(*std_args, config_opts=small_brain)
brain.add_morphometry('curv')
tmp_name = mktemp() + '.avi'
brain.animate(["m"] * 3, n_steps=2)
brain.animate(['l', 'l'], n_steps=2, fname=tmp_name)
# can't rotate in axial plane
assert_raises(ValueError, brain.animate, ['l', 'd'])
brain.close()
@requires_fsaverage
def test_views():
"""Test showing different views
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.show_view('lateral')
brain.show_view('m')
brain.show_view('rostral')
brain.show_view('caudal')
brain.show_view('ve')
brain.show_view('frontal')
brain.show_view('par')
brain.show_view('dor')
brain.show_view({'distance': 432})
brain.show_view({'azimuth': 135, 'elevation': 79}, roll=107)
brain.close()
|
aestrivex/PySurfer
|
surfer/tests/test_viz.py
|
Python
|
bsd-3-clause
| 9,130
|
[
"Mayavi"
] |
263ef0d56edc819183684bcb33b0123a30907de9c613a8439e50a44078b987e4
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test parsing a reset action."""
import io
import textwrap
from launch.actions import ResetLaunchConfigurations, SetLaunchConfiguration
from launch.frontend import Parser
from launch.launch_context import LaunchContext
def test_reset():
yaml_file = \
"""\
launch:
- let:
name: 'foo'
value: 'FOO'
- let:
name: 'bar'
value: 'BAR'
- reset:
keep:
- name: 'bar'
value: $(var bar)
- name: 'baz'
value: 'BAZ'
""" # noqa: E501
print('Load YAML')
yaml_file = textwrap.dedent(yaml_file)
print('Load Parser')
root_entity, parser = Parser.load(io.StringIO(yaml_file))
print('Parse Description')
ld = parser.parse_description(root_entity)
assert isinstance(ld.entities[0], SetLaunchConfiguration)
assert isinstance(ld.entities[1], SetLaunchConfiguration)
assert isinstance(ld.entities[2], ResetLaunchConfigurations)
lc = LaunchContext()
assert len(lc.launch_configurations) == 0
ld.entities[0].visit(lc)
ld.entities[1].visit(lc)
assert len(lc.launch_configurations) == 2
assert 'foo' in lc.launch_configurations.keys()
assert lc.launch_configurations['foo'] == 'FOO'
assert 'bar' in lc.launch_configurations.keys()
assert lc.launch_configurations['bar'] == 'BAR'
ld.entities[2].visit(lc)
assert 'foo' not in lc.launch_configurations.keys()
assert 'bar' in lc.launch_configurations.keys()
assert lc.launch_configurations['bar'] == 'BAR'
assert 'baz' in lc.launch_configurations.keys()
assert lc.launch_configurations['baz'] == 'BAZ'
if __name__ == '__main__':
test_reset()
|
ros2/launch
|
launch_yaml/test/launch_yaml/test_reset.py
|
Python
|
apache-2.0
| 2,408
|
[
"VisIt"
] |
5de241c108962f30f059988f898ec9159c4983cf909578e0f5140317ddfc5f6b
|
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from os import path
from re import match, findall, sub
import pysces
from sympy import Symbol, sympify
from datetime import datetime
# File reading/validation functions
def get_term_types_from_raw_data(raw_data_dict):
"""
Determines the types of terms defined for ThermoKin based on the
file contents. This allows for generation of latex expressions
based on these terms.
Parameters
----------
raw_data_dict : dict of str:{str:str}
Returns
-------
set of str
"""
term_types = set()
for v in raw_data_dict.values():
for k in v.keys():
term_types.add(k)
return term_types
class FormatException(Exception):
pass
def read_reqn_file(path_to_file):
"""
Reads the contents of a file and returns it as a list of lines.
Parameters
----------
path_to_file : str
Path to file that is to read in
Returns
-------
list of str
The file contents as separate strings in a list
"""
with open(path_to_file) as f:
lines = f.readlines()
return lines
def get_terms(raw_lines, term_type):
"""
Takes a list of strings and returns a new list containing only
lines starting with `term_type` and strips line endings.
Term can be either of the "main" (or `!T`) type or additional (or
`!G`) type
Parameters
----------
raw_lines : list of str
List of lines from a '.reqn' file.
term_type : str
This string specifies the type of term.
Returns
-------
list of str
"""
assert term_type == '!T' or term_type == '!G', 'Invalid term type specified'
valid_prefix_lines = [line for line in raw_lines if line.startswith(term_type)]
no_line_endings = []
for line in valid_prefix_lines:
if line[-1] == '\n':
no_line_endings.append(line[:-1])
else:
no_line_endings.append(line)
return no_line_endings
def check_term_format(lines, term_type):
"""
Inspects a list of string for the correct ThermoKin syntax. Returns
`True` in case of correct format. Throws exception otherwise.
Correct format is a str matching the pattern "X{\w*}{\w*} .*" . Where
"X" is either "!G" or "!T" as specified by `term_type`.
Parameters
----------
lines : list of str
Clean list of lines from a '.reqn' file.
term_type : str
This string specifies the type of term.
Returns
-------
bool
"""
assert term_type == '!T' or term_type == '!G', 'Invalid term type specified'
errors_in = []
for i, line in enumerate(lines):
if not match(term_type + '{\w*}{\w*} .*', line):
errors_in.append(str(i))
if len(errors_in) == 0:
return True
else:
error_str = ', '.join(errors_in)
raise FormatException('Incorrect syntax in lines:' + error_str)
def construct_dict(lines):
"""
Constructs a dictionary of dictionaries for each reaction.
Here keys of the outer dictionary is reaction name strings while
the inner dictionary keys are the term names. The inner dictionary
values are the term expressions
Parameters
----------
lines : list of str
Returns
-------
dict of str:{str:str}
"""
outer_dict = {}
for line in lines:
in_brackets = findall('(?<={)\w+', line)
r_name = in_brackets[0]
t_name = in_brackets[1]
expr = findall('(?<=\w} ).*', line)[0]
inner_dict = {t_name: expr}
if r_name in outer_dict:
outer_dict[r_name].update(inner_dict)
else:
outer_dict[r_name] = inner_dict
return outer_dict
def get_subs_dict(expression, mod):
"""
Builds a substitution dictionary of an expression based of the
values of these symbols in a model.
Parameters
----------
expression : sympy expression
mod : PysMod
Returns
-------
dict of sympy.Symbol:float
"""
subs_dict = {}
symbols = expression.atoms(Symbol)
for symbol in symbols:
attr = str(symbol)
subs_dict[attr] = getattr(mod, attr)
return subs_dict
def get_reqn_path(mod):
"""
Gets the default path and filename of`.reqn` files belonging to a model
The `.reqn` files which contain rate equations split into different
(arbitrary) components should be saved in the same directory as the model
file itself by default. It should have the same filename (sans extension)
as the model file.
Parameters
----------
mod : PysMod
A pysces model which has corresponding `.reqn` file saved in the same
directory with the same file name as the model file.
Returns
-------
str
A sting with the path and filename of the `.reqn` file.
"""
fname = mod.ModelFile
dot_loc = fname.find('.')
fname_min_ext = fname[:dot_loc]
fname_ext = fname_min_ext + '.reqn'
return path.join(mod.ModelDir, fname_ext)
def get_term_dict(raw_lines, term_type):
"""
Returns the term dictionary from a list of raw lines from a file.
The contents of a '.reqn' file is read and passed to this function.
Here the contents is parsed and 'main terms' are extracted and
returned as a dict of str:{str:str}.
Parameters
----------
raw_lines : list of str
List of lines from a '.reqn' file.
Returns
-------
dict of str:{str:str}
"""
clean_terms = get_terms(raw_lines, term_type)
if check_term_format(clean_terms, term_type):
term_dict = construct_dict(clean_terms)
return term_dict
def get_all_terms(path_to_read):
raw_lines = read_reqn_file(path_to_read)
main_terms = get_term_dict(raw_lines, '!T')
add_terms = get_term_dict(raw_lines, '!G')
return main_terms, add_terms
# File writing/validation functions
def get_str_formulas(mod):
"""
Returns a dictionary with reaction_name:string_formula as key:value
pairs.
Goes through mod.reactions and constructs a dictionary where
reaction_name is the key and mod.reaction_name.formula is the
value.
Parameters
----------
mod : PysMod
The model which will be used to construct the dictionary
Returns
-------
dict of str:str
A dictionary with reaction_name:string_formula as
key:value pairs
"""
string_formulas = {}
for reaction in mod.reactions:
string_formulas[reaction] = getattr(mod, reaction).formula
return string_formulas
def replace_pow(str_formulas):
"""
Creates new dict from an existing dict with "pow(x,y)" in values
replaced with "x**y".
Goes through the values of an dictionary and uses regex to convert
the pysces internal syntax for powers with standard python syntax.
This is needed before conversion to sympy expressions. This use case
requires reaction names as they appear in pysces as keys.
Parameters
----------
str_formulas : dict of str:str
A dictionary where the values as contain pysces format strings
representing rate equation expressions with powers in the syntax
"pow(x,y)"
Returns
-------
dict of str:str
A new dictionary with str rate equations where powers are
represented by standard python syntax e.g. x**y
"""
new_str_formulas = {}
for k, v in str_formulas.items():
new_str_formulas[k] = sub(r'pow\((\S*?),(\S*?)\)', r'\1**\2', v)
return new_str_formulas
def get_sympy_formulas(str_formulas):
"""
Converts dict with str values to sympy expression values.
Used to convert key:string_formula to key:sympy_formula. Intended
use case is for automatic separation of rate equation terms into
mass action and binding terms. This use case requires reaction
names as they appear in pysces as keys.
Parameters
----------
str_formulas : dict of str:str
Dictionary with str values that represent reaction expressions.
This dictionary needs to have already passed through all
sanitising functions/methods (e.g. `replace_pow`).
Returns
-------
dict with sympy_expression values and original keys
Dictionary where values are symbolic sympy expressions
"""
return {k: sympify(v) for (k, v) in list(str_formulas.items())}
def get_sympy_terms(sympy_formulas):
"""
Converts a dict with sympy expressions as values to a new dict with
list values containing either the original expression or a negative
and a positive expressions.
This is used to separate reversible and irreversible reactions.
Reversible reactions will have two terms, one negative and one
positive. Here expressions are expanded and split into terms and
tested for the above criteria: If met the dict value will be a list
of two expressions, each representing a term of the rate equation.
Otherwise the dict value will be a list with a single item - the
original expression. This use case requires reaction names as they
appear in pysces as keys.
Parameters
----------
sympy_formulas : dict of str:sympy expression values
Dictionary with values representing rate equations as sympy
expressions. Keys are reaction names
Returns
-------
dict of str:list sympy expression
Each list will have either have one item, the original dict
value OR two items -the original dict value split into a
negative and positive expression.
See Also
--------
check_for_negatives
"""
sympy_terms = {}
for name, formula in sympy_formulas.items():
terms = formula.expand().as_coeff_add()[1]
if len(terms) == 2 and check_for_negatives(terms):
sympy_terms[name] = terms
else:
sympy_terms[name] = [formula.factor()]
return sympy_terms
def get_ma_terms(mod, sympy_terms):
"""
Returns dict with reaction names as keys and mass action terms as
values from a dict with reaction names as keys and lists of sympy
expressions as values.
Only reversible reactions are handled. Any list in the ``sympy_terms``
dict that does not have a length of 2 will be ignored.
Parameters
----------
mod : PysMod
The model from which the `sympy_terms` dict was originally
constructed.
sympy_terms: dict of str:list of sympy expressions
This dictionary should be created by `get_sympy_terms`.
Returns
-------
dict of str:sympy expression
Each value will be a mass action term for each reaction key with
a form depending on reversibility as described above.
See Also
--------
get_st_pt_keq
get_sympy_terms
sort_terms
"""
model_map = pysces.ModelMap(mod) # model map to get substrates, products
# and parameters for each reaction
messages = {}
ma_terms = {}
for name, terms in sympy_terms.items():
reaction_map = getattr(model_map, name)
substrates = [sympify(substrate) for substrate in
reaction_map.hasSubstrates()]
products = [sympify(product) for product in reaction_map.hasProducts()]
if len(terms) == 2: # condition for reversible reactions
# make sure negative term is second in term list
terms = sort_terms(terms)
# divide pos term by neg term and factorise
expressions = (-terms[0] / terms[1]).factor()
# get substrate, product and keq terms (and strategy)
st, pt, keq, message = get_st_pt_keq(expressions, substrates,
products)
if all([st, pt, keq]):
ma_terms[name] = st - pt / keq
messages[name] = message
else:
messages[
name] = 'rate equation not included - irreversible or unknown form'
return ma_terms, messages
def get_st_pt_keq(expression, substrates, products):
"""
Takes an expression representing "substrates/products *
Keq_expression" and returns substrates, products and keq_expression
separately.
Parameters
----------
expression : sympy expression
The expression containing "substrates/products * Keq_expression"
substrates : list of sympy symbols
List with symbolic representations for each substrate involved
in the reaction which `expression` represents.
products : list of sympy symbols
List with symbolic representations for each product involved in
the reaction which `expression` represents.
Returns
-------
tuple of sympy expressions and int
This tuple contains sympy expressions for the substrates,
products and keq_expression in that order. The final value will
be an int which indicates the strategy followed.
See Also
--------
st_pt_keq_from_expression
"""
res = st_pt_keq_from_expression(expression,
substrates,
products)
subs_term, prod_term, keq, message = res
return subs_term, prod_term, keq, message
def st_pt_keq_from_expression(expression, substrates, products,
failure_threshold=10):
"""
Take an expression representing "substrates/products *
Keq_expression" and returns substrates, products and keq_expression
separately.
In this strategy there is no inspection of the stoichiometry as
provided by the model map. Here the expressions is
divided/multiplied by each substrate/product until it no longer
appears in the expression. If the substrates or products are not
removed after a defined number of attempts a total failure occurs
and the function returns `None`
This is a fallback for cases where
defined stoichiometry does not correspond to the actual rate
equation.
Here cases where the substrate/product do not appear in the rate
equation at all throws an assertion error.
Parameters
----------
expression : sympy expression
The expression containing "substrates/products * Keq_expression"
substrates : list of sympy symbols
List with symbolic representations for each substrate involved
in the reaction which `expression` represents.
products : list of sympy symbols
List with symbolic representations for each product involved in
the reaction which `expression` represents.
failure_threshold : int, optional (Default: 10)
A threshold value the defines the number of times the metabolite
removal strategy should be tried before failure.
Returns
-------
tuple of sympy_expressions or `None`
This tuple contains sympy expressions for the substrates,
products and keq_expression in that order. None is returned if
this strategy fails.
"""
new_expression = expression
subs_term = 1
prod_term = 1
fail = False
message = 'successful separation of rate equation terms'
# Remove substrates from expression by division
# Each division multiplies subs_term with substrate
for substrate in substrates:
# divide expr by subs while subs in expr
if substrate not in new_expression.atoms(Symbol):
fail = True
message = 'failure: substrate %s not in rate equation' % str(
substrate)
break
tries = 0
while substrate in new_expression.atoms(Symbol):
new_expression = new_expression / substrate
subs_term *= substrate
tries += 1
if tries > failure_threshold:
message = 'failure: cannot remove substrate %s from rate equation' % str(
substrate)
fail = True
break
if fail:
break
# Same as above but for products
# Product removed by multiplication
if not fail:
for product in products:
if product not in new_expression.atoms(Symbol):
fail = True
message = 'failure: product %s not in rate equation' % str(
product)
break
tries = 0
while product in new_expression.atoms(Symbol):
new_expression = new_expression * product
prod_term *= product
tries += 1
if tries > failure_threshold:
message = 'failure: cannot remove product %s from rate equation' % str(
product)
fail = True
break
if fail:
break
keq = new_expression.subs({1.0: 1})
if fail:
return 0, 0, 0, message
else:
return subs_term, prod_term, keq, message
def sort_terms(terms):
"""
Returns a list of two sympy expressions where the expression is
positive and the second expression is negative.
Parameters
----------
terms : list of sympy expressions
A list with length of 2 where one element is positive and
the other is negative (starts with a minus symbol)
Returns
-------
tuple of sympy expressions
A tuple where the first element is positive and the
second is negative.
"""
neg = None
pos = None
for term in terms:
if str(term)[0] == '-': # negative terms should start with a '-'
neg = term
else:
pos = term
assert neg, 'No negative terms ' + str(terms)
assert pos, 'No positive terms ' + str(terms)
return pos, neg
def get_binding_vc_terms(sympy_formulas, ma_terms):
"""
Returns dictionary with a combined "rate capacity" and "binding"
term as values.
Uses the symbolic rate equations dictionary and mass action term
dictionaries to construct a new dictionary with "rate capacity-
binding" terms. The symbolic rate equations are divided by their
mass action terms. The results are the "rate capacity-binding"
terms. This use case requires reaction names as they appear in
pysces as keys for both dictionaries.
Parameters
----------
sympy_formulas : dict of str:sympy expression
Full rate equations for all reactions in model. Keys are
reaction names and correspond to this in `ma_terms`.
ma_terms : dict of str:sympy expression
Mass action terms for all reactions in model. Keys are reaction
names and correspond to this in `sympy_formulas`.
Returns
-------
dict of str:sympy expression
A dictionary with reaction names as keys and sympy
expressions representing "rate capacity-binding"
terms as values.
"""
binding_terms = {}
for name, ma_term in ma_terms.items():
binding_terms[name] = (sympy_formulas[name] / ma_term).factor().factor()
return binding_terms
def check_for_negatives(terms):
"""
Returns `True` for a list of sympy expressions contains any
expressions that are negative.
Parameters
----------
terms : list of sympy expressions
A list where expressions may be either positive or negative.
Returns
-------
bool
`True` if any negative terms in expression. Otherwise
`False`
"""
any_negs = False
for term in terms:
if str(term)[0] == '-':
any_negs = True
return any_negs
def create_reqn_data(mod):
string_formulas = get_str_formulas(mod)
string_formulas = replace_pow(string_formulas)
sympy_formulas = get_sympy_formulas(string_formulas)
sympy_terms = get_sympy_terms(sympy_formulas)
non_irr = filter_irreversible(sympy_terms)
gamma_keq_terms, _ = get_gamma_keq_terms(mod, non_irr)
ma_terms, messages = get_ma_terms(mod, sympy_terms)
binding_vc_terms = get_binding_vc_terms(sympy_formulas, ma_terms)
return ma_terms, binding_vc_terms, gamma_keq_terms, messages
def create_gamma_keq_reqn_data(mod):
string_formulas = get_str_formulas(mod)
string_formulas = replace_pow(string_formulas)
sympy_formulas = get_sympy_formulas(string_formulas)
sympy_terms = get_sympy_terms(sympy_formulas)
sympy_terms = filter_irreversible(sympy_terms)
gamma_keq, messages = get_gamma_keq_terms(mod, sympy_terms)
return gamma_keq, messages
def get_gamma_keq_terms(mod, sympy_terms):
model_map = pysces.ModelMap(mod) # model map to get substrates, products
# and parameters for each reaction
messages = {}
gamma_keq_terms = {}
for name, terms in sympy_terms.items():
reaction_map = getattr(model_map, name)
substrates = [sympify(substrate) for substrate in
reaction_map.hasSubstrates()]
products = [sympify(product) for product in reaction_map.hasProducts()]
if len(terms) == 2: # condition for reversible reactions
# make sure negative term is second in term list
terms = sort_terms(terms)
# divide pos term by neg term and factorise
expressions = (-terms[0] / terms[1]).factor()
# get substrate, product and keq terms (and strategy)
st, pt, keq, _ = get_st_pt_keq(expressions, substrates,
products)
if all([st, pt, keq]):
gamma_keq_terms[name] = pt / (keq*st)
messages[name] = 'successful generation of gamma/keq term'
else:
messages[name] = 'generation of gamma/keq term failed'
return gamma_keq_terms, messages
def filter_irreversible(sympy_terms):
new_sympy_terms = {}
for k, v in sympy_terms.items():
if len(v) == 2:
new_sympy_terms[k] = v
return new_sympy_terms
def write_reqn_file(file_name, model_name, ma_terms, vc_binding_terms, gamma_keq_terms, messages):
already_written = []
date = datetime.strftime(datetime.now(), '%H:%M:%S %d-%m-%Y')
with open(file_name, 'w') as f:
f.write('# Automatically parsed and split rate equations for model: %s\n' % model_name)
f.write('# generated on: %s\n\n' % date)
f.write('# Note that this is a best effort attempt that is highly dependent\n')
f.write('# on the form of the rate equations as defined in the model file.\n')
f.write('# Check correctness before use.\n\n')
for reaction_name, ma_term in ma_terms.items():
already_written.append(reaction_name)
f.write('# %s :%s\n' % (reaction_name, messages[reaction_name]))
f.write('!T{%s}{ma} %s\n' % (reaction_name, ma_term))
f.write('!T{%s}{bind_vc} %s\n' % (
reaction_name, vc_binding_terms[reaction_name]))
f.write('!G{%s}{gamma_keq} %s\n' % (reaction_name, gamma_keq_terms[reaction_name]))
f.write('\n')
for k, v in messages.items():
if k not in already_written:
f.write('# %s :%s\n' % (k, v))
def term_to_file(file_name, expression, parent_name=None, term_name=None ):
date = datetime.strftime(datetime.now(), '%H:%M:%S %d-%m-%Y')
if not parent_name:
parent_name = 'undefined'
if not term_name:
term_name = 'undefined'
with open(file_name,'a') as f:
f.write('\n')
f.write('# Additional term appended on %s\n' % date)
if 'undefined' in (term_name,parent_name):
print('Warning: writing partially defined term to %s. Please inspect file for further details.' % file_name)
f.write('# The term below is partially defined - fix term manually by defining reaction and term names\n')
f.write('!G{%s}{%s} %s\n' % (parent_name,
term_name,
expression))
# There functions are not used anymore
#
# def get_gamma_keq_terms(mod, sympy_terms):
# model_map = pysces.ModelMap(mod) # model map to get substrates, products
# # and parameters for each reaction
#
# messages = {}
# gamma_keq_terms = {}
# for name, terms in sympy_terms.iteritems():
# reaction_map = getattr(model_map, name)
#
# substrates = [sympify(substrate) for substrate in
# reaction_map.hasSubstrates()]
#
# products = [sympify(product) for product in reaction_map.hasProducts()]
#
# if len(terms) == 2: # condition for reversible reactions
# # make sure negative term is second in term list
# terms = sort_terms(terms)
# # divide pos term by neg term and factorise
# expressions = (-terms[0] / terms[1]).factor()
# # get substrate, product and keq terms (and strategy)
# st, pt, keq, _ = get_st_pt_keq(expressions, substrates,
# products)
# if all([st, pt, keq]):
# gamma_keq_terms[name] = pt / (keq*st)
# messages[name] = 'successful generation of gamma/keq term'
# else:
# messages[name] = 'generation of gamma/keq term failed'
#
# return gamma_keq_terms, messages
#
# def create_gamma_keq_reqn_data(mod):
# string_formulas = get_str_formulas(mod)
# string_formulas = replace_pow(string_formulas)
# sympy_formulas = get_sympy_formulas(string_formulas)
# sympy_terms = get_sympy_terms(sympy_formulas)
# sympy_terms = filter_irreversible(sympy_terms)
# gamma_keq, messages = get_gamma_keq_terms(mod, sympy_terms)
# return gamma_keq, messages
#
# def get_irr_ma(expression, parameters, substrates, stoichiometry):
# """
# Returns a mass action expression for an irreversible reaction (which
# simply consists of substrates).
#
# Here two strategies are tried - if both fail, the answer from the
# first strategy is used. For details refer to functions mentioned
# under `See Also`.
#
# Parameters
# ----------
# expression : sympy expression
# A sympy expression representing a rate equation of an
# irreversible reaction.
# parameters : list of sympy symbols
# List with symbolic representations for each parameter involved
# in the reaction which `expression` represents.
# substrates : list of sympy symbols
# List with symbolic representations for each substrate involved
# in the reaction which `expression` represents.
# stoichiometry : dict of sympy.Symbol:float
# Symbolic representations of the substrates and products are used
# for the keys of this dict while the stoichiometric coefficient
# values are floats.
#
#
# Returns
# -------
# tuple of sympy expression and int
# Symbolic expression for the mass action term of the irreversible
# reaction and an integer indicating the strategy used.
#
# See Also
# --------
# irr_ma_from_coeffs
# irr_ma_from_expression
# """
# # strategy 1
# strategy = 1
# substrate_term = irr_ma_from_coeffs(substrates, stoichiometry)
# valid = validate_irr_ma(expression, substrate_term)
# if not valid:
# # fallback strategy
# strategy = 2
# final_fallback = substrate_term
# substrate_term = irr_ma_from_expression(expression, parameters)
# # complete failure
# if not substrate_term:
# strategy = 3
# substrate_term = final_fallback
#
# return substrate_term, strategy
#
#
# def irr_ma_from_coeffs(substrates, stoichiometry):
# """
# Returns a mass action expression for an irreversible reaction (which
# simply consists of substrates).
#
# In this strategy the stoichiometric coefficients are used to
# construct the substrate terms. Here an invalid substrate term can be
# produced when the rate equation does not follow the stoichiometry
# as defined in the model and the answer has to be validated using
# `validate_irr_ma`.
#
# Parameters
# ----------
# substrates : list of sympy symbols
# List with symbolic representations for each substrate involved
# in the reaction.
# stoichiometry : dict of sympy.Symbol:float
# Symbolic representations of the substrates and products are used
# for the keys of this dict while the stoichiometric coefficient
# values are floats.
#
#
# Returns
# -------
# sympy expression
# A symbolic expression of the substrate term of a mass action
# expression for an irreversible reaction constructed using
# stoichiometric coefficients.
#
#
# """
# return build_metabolite_term(substrates, stoichiometry)
#
#
# def irr_ma_from_expression(expression, parameters, failure_threshold=10):
# """
# Returns a mass action expression for an irreversible reaction (which
# simply consists of substrates).
#
# In this strategy there is no inspection of the stoichiometry as
# provided by the model map. Here the expressions is divided or
# multiplied by each parameter that initially appears in the
# expression until it does not appear in the expression. If the
# parameter is not removed after a defined number of attempts a total
# failure occurs and this function returns `None`. This is a fallback
# for cases where defined stoichiometry does not correspond to the
# actual rate equation.
#
# Parameters
# ----------
# expression : sympy expression
# A sympy expression representing a rate equation of an
# irreversible reaction.
# parameters : list of sympy symbols
# List with symbolic representations for each parameter involved
# in the reaction which `expression` represents.
# failure_threshold : int, optional (Default: 10)
# A threshold value the defines the number of times the parameter
# removal strategy should be tried before failure.
#
# Returns
# -------
# sympy expression or None
# A symbolic expression of the substrate term of a mass action
# expression for an irreversible reaction constructed the rate
# equation and parameters. None is returned in case of failure
# """
# expression_num = fraction(expression.expand())[0]
# reset_point = expression_num
# fail = False
# for parameter in parameters:
# tries = 0
# switch_strat = False
# while parameter in expression_num.atoms(Symbol):
# expression_num = (expression_num / parameter).factor()
# tries += 1
# if tries > failure_threshold:
# switch_strat = True
# break
#
# if switch_strat:
# expression_num = reset_point
# tries = 0
# while parameter in expression_num.atoms(Symbol):
# expression_num = (expression_num * parameter).factor()
# tries += 1
# if tries > failure_threshold:
# fail = True
# break
#
# if fail:
# break
# reset_point = expression_num
# if fail:
# return None
# else:
# return expression_num
#
#
# def validate_irr_ma(expression, substrate_term):
# """
# Returns `True` when the substrates in the substrates term has the same
# number of coefficients as in the rate equation numerator.
#
# In theory an expanded rate equation expression numerator of an
# irreversible reaction should consist of only parameters and
# substrates. Therefore, division of this numerator by the substrate
# term should yield an expression without any substrates.
#
# Parameters
# ----------
# expression : sympy expression
# A sympy expression representing a rate equation of an
# irreversible reaction
# substrate_term : sympy expression
# A sympy expression representing the substrate (mass action) term
# of an irreversible reaction
#
# Returns
# -------
# boolean
# `True` for valid substrate term, otherwise `False`.
# """
# expression_num = fraction(expression.expand())[0]
# remainder = expression_num / substrate_term
# subs_atoms = substrate_term.atoms(Symbol)
# valid = True
# for remainder_atom in remainder.atoms(Symbol):
# if remainder_atom in subs_atoms:
# valid = False
# return valid
#
# def build_metabolite_term(met_list, stoichiometry):
# """
# Given a list of metabolites and a dict with stoichiometry, this
# function returns a metabolite term for a mass action expression.
#
# Parameters
# ----------
# met_list : list of sympy.Symbol
# List of symbolic representations of metabolites
# (either products or substrates) that appear in a reaction.
# stoichiometry : dict of sympy.Symbol:float
# Symbolic representations of the metabolites are used as the keys
# of this dict while the stoichiometric coefficients are floats
#
# Returns
# -------
# sympy expression
# A symbolic expression of the metabolite term of a mass action
# expression constructed using stoichiometric coefficients.
#
# See Also
# --------
# st_pt_keq_from_coeffs
# """
# met_term = 1
# for met in met_list:
# met_term *= met ** stoichiometry[met]
#
# met_term = met_term.subs({1.0: 1})
# return met_term
#
# def st_pt_keq_from_coeffs(expression, substrates, products, stoichiometry):
# """
# Takes an expression representing "substrates/products *
# Keq_expression" and returns substrates, products and keq_expression
# separately.
#
# In this strategy the stoichiometric coefficients are used to
# construct the substrate, product and Keq terms. Here an invalid Keq
# expression can be produced when the rate equation does not follow
# the stoichiometry as defined in the model and the answer has to be
# validated using `validate_keq_expression`.
#
# Parameters
# ----------
# expression : sympy expression
# The expression containing "substrates/products * Keq_expression"
# substrates : list of sympy symbols
# List with symbolic representations for each substrate involved
# in the reaction which `expression` represents.
# products : list of sympy symbols
# List with symbolic representations for each product involved in
# the reaction which `expression` represents.
# stoichiometry : dict of sympy.Symbol:float
# Symbolic representations of the substrates and products are used
# for the keys of this dict while the stoichiometric coefficients
# are floats.
#
# Returns
# -------
# tuple of sympy_expressions with length of 3
# This tuple contains sympy expressions for the substrates,
# products and keq_expression in that order
#
# See Also
# --------
# get_st_pt_keq
# st_pt_keq_from_expression
# build_metabolite_term
#
# """
# subs_term = build_metabolite_term(substrates, stoichiometry)
# prod_term = build_metabolite_term(products, stoichiometry)
# keq = ((expression / subs_term) * prod_term).factor().subs({1.0: 1})
# return subs_term, prod_term, keq
#
# def validate_keq_expression(expression, substrates, products):
# """
# Returns `True` when an expression does not contain any products
# or substrates.
#
# A valid Keq expression is either a single parameter representing the
# Keq or it consists of parameters (maybe some variables) which
# represents the Keq. There are no substrates or products of the
# reaction in the Keq expression.
#
# Parameters
# ----------
# expression : sympy expression
# A symbolic expression representing the Keq. May be valid or
# invalid.
# substrates : list of sympy.Symbol
# List of symbols for substrates involved in the reaction for
# which `expression` is the Keq expression.
# products : list of sympy.Symbol
# List of symbols for products involved in the reaction for
# which `expression` is the Keq expression.
#
# Returns
# -------
# bool
# True for valid Keq expression, False if invalid.
#
# See Also
# --------
# st_pt_keq_from_coeffs
# """
# valid = True
# expression_symbols = expression.atoms(Symbol)
# for metabolite in substrates + products:
# if metabolite in expression_symbols:
# valid = False
# return valid
|
PySCeS/PyscesToolbox
|
psctb/analyse/_thermokin_file_tools.py
|
Python
|
bsd-3-clause
| 36,903
|
[
"PySCeS"
] |
c2862f4cae32939526b05e67067a70a0314dab395b5ac98a2717be7c2b844de6
|
#!/usr/bin/env python
# this script checks if any piRNA sequences BLAST to the TE sequences with varying percent identities
# first 8 bases of piRNA must match
# USE: piBLAST.py
import re
import sys
import os
from subprocess import Popen, PIPE
from collections import defaultdict
from collections import Counter
import pickle
import itertools
pi_IN="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/WB_piRNA_positions.gff"
reference="/lscr2/andersenlab/kml436/sv_sim2/c_elegans.PRJNA13758.WS245.genomic.fa"
pi_fasta="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/piRNAs.fasta"
TE_consensus="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/SET2/round2_consensus_set2.fasta"
family_renames="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/round2_WB_familes_set2.txt"
# put shortened WB family names into a dictionary
renames={}
with open(family_renames, 'r') as IN:
for line in IN:
line=line.rstrip('\n')
items=re.split('\t',line)
element,family=items[0:2]
renames[element]=family
# make blast database of TE sequences if it doesn't already exist
if not os.path.isfile("TE_database.nsq"):
cmd="/lscr2/andersenlab/kml436/ncbi-blast-2.2.30+/bin/makeblastdb -in {TE_consensus} -dbtype nucl -out TE_database".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
else:
print "BLAST database already exists, continuing..."
# blast piRNA seqeunces to TE sequences
cmd="/lscr2/andersenlab/kml436/ncbi-blast-2.2.30+/bin/blastn -db TE_database -query {pi_fasta} -evalue .1 -word_size 5 -outfmt '6 qseqid sseqid pident qlen length mismatch gapopen evalue bitscore qstart qend btop' -max_target_seqs 100 -out piRNA_blast.txt -num_threads 10".format(**locals())
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
OUT=open("piRNA_blast_strict_redundant.txt", 'w')
with open("piRNA_blast.txt".format(**locals()) ,'r') as IN:
for line in IN:
line=line.rstrip()
items=re.split('\t',line)
TE=items[1]
query_start=int(items[9])
btop=items[11]
btop_nums=re.findall('\d+', btop)
first_digit=int(btop_nums[0])
if TE in renames.keys():
TE=renames[TE]
items[1]=TE
new_line='\t'.join(items[0:])
#if query_start==1 : #and first_digit>=8
OUT.write(new_line + '\n')
OUT.close()
cmd="cat piRNA_blast_strict_redundant.txt |sort -k1,1 -k2,2 -k10,10 -k11,11r -k8,8 -k9,9 -k3,3r |uniq > piRNA_blast_strict_21.txt" #| awk '$11>20 {print $0}'
result, err = Popen([cmd],stdout=PIPE, stderr=PIPE, shell=True).communicate()
seen={}
OUT=open("summary_mismatches_BLAST_strict.txt", 'w')
OUT.write("Number of Mismatches\tNumber Unique piRNAs Aligned to One TE\tNumber Unique piRNAs Aligned to Multiple TEs\n")
BLAST_PAIRS=open("blast_pairs.txt", 'w')
mis_per= {'zero': 100, 'one': 95.23,'two': 90.48, 'three': 85.71,'four':80.95,'five':76.19}
num_ver= {'zero': 0, 'one': 1,'two': 2, 'three': 3, 'four':4, 'five':5}
def piblast(mismatch):
blasts={}
blasts=defaultdict(list)
pi_one=0
pi_multiple=0
with open("piRNA_blast_strict_21.txt", 'r') as IN:
for line in IN:
print line
line=line.rstrip('\n')
items=re.split('\t',line)
query,TE,perID=items[0:3]
match = re.search("(?:Pseudogene|Transcript|sequence_name|^Name)(?:=|:)([\w|\d]+.\d+)", query) #just pull gene name, remove splice info
pi_transcript =match.group(1)
perID=items[2]
beat_per=mis_per[mismatch]
length_align=items[10]
info_align=items[11]
matches=re.split("\D+",info_align)
matches=[int(i) for i in matches]
matched_bases=sum(matches)
print matched_bases
num=num_ver[mismatch]
print num
family_short=re.sub("_CE$","",TE)
family_short=re.sub("WBTransposon","WBT",family_short)
pair=family_short + "_" + pi_transcript
actual_mismatch=21-int(matched_bases)
print actual_mismatch
if actual_mismatch<=num:
#if float(perID)>=beat_per:
if pair not in seen.keys():
blasts[family_short].append(pi_transcript)
BLAST_PAIRS.write("{pi_transcript}\t{family_short}\t{num}\n".format(**locals()))
seen[pair]=mismatch
blasts_TEs_strict = len(blasts.keys())
vals=list(itertools.chain(blasts.values()))
blasts_pis_strict =len(set(list(itertools.chain.from_iterable(vals))))
#ditct[TE].pi,pi,pi
blasts_pis_new=list(itertools.chain.from_iterable(vals))
pi_counts=Counter(blasts_pis_new)
print pi_counts
for k,v in pi_counts.items():
if v==1:
pi_one+=1
else:
pi_multiple+=1
#OUT.write("{mismatch}\t{blasts_pis_strict}\t{blasts_TEs_strict}\n".format(**locals()))
OUT.write("{mismatch}\t{pi_one}\t{pi_multiple}\n".format(**locals()))
with open("strict_blasts_{mismatch}.txt".format(**locals()), "wb") as fp: # Pickle
pickle.dump(blasts, fp)
piblast('zero')
piblast('one')
piblast('two')
piblast('three')
piblast('four')
piblast('five')
#for k,v in seen.items():
# print k
# print v
OUT.close()
BLAST_PAIRS.close()
|
klaricch/Transposons2
|
scripts/piBLAST.py
|
Python
|
mit
| 4,911
|
[
"BLAST"
] |
b8c92789a48fefafc59303fd11104cea5059e01fa8b567f50a612ddb76fd30b0
|
import math
import numpy
import numpy.ma
import datetime
from scipy.interpolate import RegularGridInterpolator
import matplotlib.pyplot as plt
from antpat.io.feko_ffe import FEKOffe, FEKOffeRequest
from .pntsonsphere import sph2crtISO
class TVecFields(object):
"""Provides a tangetial vector function on a spherical grid. The
coordinates (theta,phi) should be in radians. The vector components
can be either in polar spherical basis or in Ludwig3."""
def __init__(self, *args):
if len(args) > 0:
self._full_init(*args)
def _full_init(self, thetaMsh, phiMsh, F1, F2, R=None, basisType='polar'):
self.R = R
self.thetaMsh = thetaMsh # Assume thetaMsh is repeated columns
# (unique axis=0)
self.phiMsh = phiMsh # Assume thetaMsh is repeated rows (uniq. axis=1)
if basisType == 'polar':
self.Fthetas = F1
self.Fphis = F2
elif basisType == 'Ludwig3':
# For now convert Ludwig3 components to polar spherical.
self.Fthetas, self.Fphis = Ludwig32sph(self.phiMsh, F1, F2)
else:
raise RuntimeError("Error: Unknown basisType {}".format(basisType))
def load_ffe(self, filename, request=None):
ffefile = FEKOffe(filename)
if request is None:
if len(ffefile.Requests) == 1:
request = ffefile.Requests.pop()
else:
raise RuntimeError("File contains multiple FFs (specify one): "
+ ','.join(ffefile.Requests))
ffereq = ffefile.Request[request]
self.R = numpy.array(ffereq.freqs)
self.thetaMsh = numpy.deg2rad(ffereq.theta)
self.phiMsh = numpy.deg2rad(ffereq.phi)
nrRs = len(self.R)
self.Fthetas = numpy.zeros((nrRs, ffereq.stheta, ffereq.sphi),
dtype=complex)
self.Fphis = numpy.zeros((nrRs, ffereq.stheta, ffereq.sphi),
dtype=complex)
# Maybe this could be done better?
# Convert list over R of arrays over theta,phi to array
# over R, theta, phi
for ridx in range(nrRs):
self.Fthetas[ridx, :, :] = ffereq.etheta[ridx]
self.Fphis[ridx, :, :] = ffereq.ephi[ridx]
# Remove redundant azimuth endpoint 2*pi
if ffereq.phi[0, 0] == 0. and ffereq.phi[0, -1] == 360.:
self.thetaMsh = numpy.delete(self.thetaMsh, -1, 1)
self.phiMsh = numpy.delete(self.phiMsh, -1, 1)
self.Fthetas = numpy.delete(self.Fthetas, -1, 2)
self.Fphis = numpy.delete(self.Fphis, -1, 2)
def save_ffe(self, filename, request='FarField', source='Unknown'):
""" """
ffefile = FEKOffe()
ffefile.ftype = 'Far Field'
ffefile.fformat = '3'
ffefile.source = source
ffefile.date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Setup request
ffereq = FEKOffeRequest(request)
if self.R is not None:
freqs = self.R
else:
freqs = [0.0]
ffereq.theta = numpy.rad2deg(self.thetaMsh)
ffereq.phi = numpy.rad2deg(self.phiMsh)
coord = 'Spherical'
stheta = ffereq.theta.shape[0]
sphi = ffereq.phi.shape[1]
rtype = 'Gain'
for ridx in range(len(freqs)):
ffereq._add_head(freqs[ridx], coord, stheta, sphi, rtype)
if self.R is not None:
ffereq.etheta.append(self.Fthetas[ridx, :, :].squeeze())
ffereq.ephi.append(self.Fphis[ridx, :, :].squeeze())
gtheta = numpy.abs(self.Fthetas[ridx, :, :].squeeze())**2
gphi = numpy.abs(self.Fphis[ridx, :, :].squeeze())**2
else:
ffereq.etheta.append(self.Fthetas)
ffereq.ephi.append(self.Fphis)
gtheta = numpy.abs(self.Fthetas)**2
gphi = numpy.abs(self.Fphis)**2
gtotal = gtheta + gphi
ffereq.gtheta.append(gtheta)
ffereq.gphi.append(gphi)
ffereq.gtotal.append(gtotal)
# Add redundant azimuth endpoint 2*pi ?
ffefile.Requests.add(request)
ffefile.Request[request] = ffereq
ffefile.write(filename)
def scale(self, scalefac):
"""Scale Fphis and Fthetas by a multiplicative scale factor scalefac.
"""
self.Fthetas = scalefac * self.Fthetas
self.Fphis = scalefac * self.Fphis
def getthetas(self):
return self.thetaMsh
def getphis(self):
return self.phiMsh
def getFthetas(self, Rval=.0):
Rind = self.getRind(Rval)
if Rind is None:
return self.Fthetas
else:
return numpy.squeeze(self.Fthetas[Rind, ...])
def getFphis(self, Rval=0.):
Rind = self.getRind(Rval)
if Rind is None:
return self.Fphis
else:
return numpy.squeeze(self.Fphis[Rind, ...])
def getFgridAt(self, R):
return (self.getFthetas(R), self.getFphis(R))
def getRs(self):
return self.R
def getRind(self, Rval):
if self.R is None or type(self.R) is float:
return None
r_idx = (numpy.abs(self.R-Rval)).argmin()
return r_idx
def getFalong(self, theta_ub, phi_ub, Rval=None):
"""Get vector field for the given direction."""
thetadomshp = theta_ub.shape
# phidomshp = phi_ub.shape
outshp = thetadomshp
theta_ub = theta_ub.flatten()
phi_ub = phi_ub.flatten()
(theta, phi) = putOnPrincBranch(theta_ub, phi_ub)
thetaphiAxis, F_th_prdc, F_ph_prdc = periodifyRectSphGrd(
self.thetaMsh, self.phiMsh, self.Fthetas, self.Fphis
)
if type(self.R) is not float:
(rM, thetaM) = numpy.meshgrid(Rval, theta, indexing='ij')
(rM, phiM) = numpy.meshgrid(Rval, phi, indexing='ij')
rthetaphi = numpy.zeros(rM.shape+(3,))
rthetaphi[:, :, 0] = rM
rthetaphi[:, :, 1] = thetaM
rthetaphi[:, :, 2] = phiM
rthetaphiAxis = (self.R,)+thetaphiAxis
outshp = (len(Rval),)+outshp
else:
rthetaphi = numpy.array([theta, phi]).T
rthetaphiAxis = thetaphiAxis
F_th_intrpf = RegularGridInterpolator(rthetaphiAxis, F_th_prdc)
F_th = F_th_intrpf(rthetaphi)
F_ph_intrpf = RegularGridInterpolator(rthetaphiAxis, F_ph_prdc)
F_ph = F_ph_intrpf(rthetaphi)
F_th = F_th.reshape(outshp)
F_ph = F_ph.reshape(outshp)
return F_th, F_ph
def getAngRes(self):
"""Get angular resolution of mesh grid."""
resol_th = self.thetaMsh[1, 0]-self.thetaMsh[0, 0]
resol_ph = self.phiMsh[0, 1]-self.phiMsh[0, 0]
return resol_th, resol_ph
def sphinterp_my(self, theta, phi):
# Currently this uses nearest value. No interpolation!
resol_th, resol_ph = self.getAngRes()
ind0 = numpy.argwhere(numpy.isclose(self.thetaMsh[:, 0]-theta,
numpy.zeros(self.thetaMsh.shape[0]),
rtol=0.0, atol=resol_th))[0][0]
ind1 = numpy.argwhere(numpy.isclose(self.phiMsh[0, :]-phi,
numpy.zeros(self.phiMsh.shape[1]),
rtol=0.0, atol=resol_ph))[0][0]
F_th = self.Fthetas[ind0, ind1]
F_ph = self.Fphis[ind0, ind1]
return F_th, F_ph
def rotate90z(self, sense=+1):
self.phiMsh = self.phiMsh+sense*math.pi/2
self.canonicalizeGrid()
def canonicalizeGrid(self):
"""Put the grid into a canonical order so that azimuth goes
from 0:2*pi."""
# For now only azimuths.
# First put all azimuthals on 0:2*pi branch:
branchNum = numpy.floor(self.phiMsh/(2*math.pi))
self.phiMsh = self.phiMsh-branchNum*2*math.pi
# Assume that only columns (axis=1) have to be sorted.
i = numpy.argsort(self.phiMsh[0, :])
self.phiMsh = self.phiMsh[:, i]
# thetas shouldn't need sorting on columns, but F field does:
self.Fthetas = self.Fthetas[..., i]
self.Fphis = self.Fphis[..., i]
def periodifyRectSphGrd(thetaMsh, phiMsh, F1, F2):
"""Create a 'periodic' function in azimuth."""
# theta is assumed to be on [0,pi] but phi on [0,2*pi[.
thetaAx0 = thetaMsh[:, 0].squeeze()
phiAx0 = phiMsh[0, :].squeeze()
phiAx = phiAx0.copy()
phiAx = numpy.append(phiAx, phiAx0[0]+2*math.pi)
phiAx = numpy.insert(phiAx, 0, phiAx0[-1]-2*math.pi)
F1ext = numpy.concatenate((F1[..., -1:], F1, F1[..., 0:1]), axis=-1)
F2ext = numpy.concatenate((F2[..., -1:], F2, F2[..., 0:1]), axis=-1)
return (thetaAx0, phiAx), F1ext, F2ext
def putOnPrincBranch(theta, phi):
branchNum = numpy.floor(phi/(2*math.pi))
phi_pb = phi-branchNum*2*math.pi
theta = numpy.abs(theta)
branchNum = numpy.round(theta/(2*math.pi))
theta_pb = numpy.abs(theta-branchNum*2*math.pi)
return (theta_pb, phi_pb)
def transfVecField2RotBasis(basisto, thetas_phis_build, F_th_ph):
"""This is essentially a parallactic rotation of the transverse field."""
thetas_build, phis_build = thetas_phis_build
F_th, F_ph = F_th_ph
xyz = numpy.asarray(sph2crtISO(thetas_build, phis_build))
xyzto = numpy.matmul(basisto, xyz)
sphcrtMat = getSph2CartTransfMatT(xyz, ISO=True)
sphcrtMatto = getSph2CartTransfMatT(xyzto, ISO=True)
sphcrtMatfrom_to = numpy.matmul(numpy.transpose(basisto), sphcrtMatto)
parRot = numpy.matmul(numpy.swapaxes(sphcrtMat[:, :, 1:], 1, 2),
sphcrtMatfrom_to[:, :, 1:])
F_thph = numpy.rollaxis(numpy.array([F_th, F_ph]), 0, F_th.ndim+1
)[..., numpy.newaxis]
F_thph_to = numpy.rollaxis(numpy.matmul(parRot, F_thph).squeeze(), -1, 0)
return F_thph_to
def getSph2CartTransfMat(rvm, ISO=False):
"""Compute the transformation matrix from a spherical basis to a Cartesian
basis at the field point given by the input 'r'. If input 'r' is an array
with dim>1 then the last dimension holds the r vector components.
The output 'transf_sph2cart' is defined such that:
[[v_x], [v_y], [v_z]]=transf_sph2cart*matrix([[v_r], [v_phi], [v_theta]]).
for non-ISO case.
Returns transf_sph2cart[si,ci,bi] where si,ci,bi are the sample index,
component index, and basis index resp.
The indices bi=0,1,2 map to r,phi,theta for non-ISO otherwise they map to
r,theta,phi resp., while ci=0,1,2 map to xhat, yhat, zhat resp."""
nrOfrv = rvm.shape[0]
rabs = numpy.sqrt(rvm[:, 0]**2+rvm[:, 1]**2+rvm[:, 2]**2)
rvmnrm = rvm/rabs[:, numpy.newaxis]
xu = rvmnrm[:, 0]
yu = rvmnrm[:, 1]
zu = rvmnrm[:, 2]
rb = numpy.array([xu, yu, zu])
angnrm = 1.0/numpy.sqrt(xu*xu+yu*yu)
phib = angnrm*numpy.array([yu, -xu, numpy.zeros(nrOfrv)])
thetab = angnrm*numpy.array([xu*zu, yu*zu, -(xu*xu+yu*yu)])
if ISO:
transf_sph2cart = numpy.array([rb, thetab, phib])
else:
transf_sph2cart = numpy.array([rb, phib, thetab])
# Transpose the result to get output as stack of transform matrices:
transf_sph2cart = numpy.transpose(transf_sph2cart, (2, 1, 0))
return transf_sph2cart
def getSph2CartTransfMatT(rvm, ISO=False):
"""Analogous to previous but with input transposed. """
shOfrv = rvm.shape[1:]
dmOfrv = rvm.ndim-1
rabs = numpy.sqrt(rvm[0]**2+rvm[1]**2+rvm[2]**2)
rvmnrm = rvm/rabs
xu = rvmnrm[0]
yu = rvmnrm[1]
zu = rvmnrm[2]
rb = numpy.array([xu, yu, zu])
nps = rb[2, ...] == 1.0
rho = numpy.sqrt(xu*xu+yu*yu)
npole = numpy.where(rho == 0.)
rho[npole] = numpy.finfo(float).tiny
angnrm = 1.0/rho
phib = angnrm*numpy.array([yu, -xu, numpy.zeros(shOfrv)])
thetab = angnrm*numpy.array([xu*zu, yu*zu, -(xu*xu+yu*yu)])
if len(npole[0]) > 0:
phib[:, nps] = numpy.array([0, 1, 0])[:, None]
thetab[:, nps] = numpy.array([1, 0, 0])[:, None]
# CHECK signs of basis!
if ISO:
transf_sph2cart = numpy.array([rb, thetab, phib])
else:
transf_sph2cart = numpy.array([rb, -phib, thetab])
# Transpose the result to get output as stack of transform matrices:
transf_sph2cart = numpy.rollaxis(transf_sph2cart, 0, dmOfrv+2)
transf_sph2cart = numpy.rollaxis(transf_sph2cart, 0, dmOfrv+2-1)
return transf_sph2cart
def plotAntPat2D(angle_rad, F_th, F_ph, freq=0.5):
fig = plt.figure()
ax1 = fig.add_subplot(211)
angle = numpy.rad2deg(angle_rad)
ax1.plot(angle, numpy.abs(F_th), label="F_th")
ax1.plot(angle, numpy.abs(F_ph), label="F_ph")
ax2 = fig.add_subplot(212)
ax2.plot(angle, numpy.rad2deg(F_th))
ax2.plot(angle, numpy.rad2deg(F_ph))
plt.show()
def plotFEKO(filename, request=None, freq_req=None):
"""Convenience function that reads in FEKO FFE files - using load_ffe() -
and plots it - using plotvfonsph()."""
tvf = TVecFields()
tvf.load_ffe(filename, request)
freqs = tvf.getRs()
# frqIdx = numpy.where(numpy.isclose(freqs,freq,atol=190e3))[0][0]
if freq_req is None:
print("")
print("No user specified frequency (will choose first in list)")
print("List of frequencies (in Hz):")
print(", ".join([str(f) for f in freqs]))
print("")
frqIdx = 0
else:
frqIdx = numpy.interp(freq_req, freqs, range(len(freqs)))
freq = freqs[frqIdx]
print("Frequency={}".format(freq))
(THETA, PHI, E_th, E_ph) = (tvf.getthetas(), tvf.getphis(),
tvf.getFthetas(freq), tvf.getFphis(freq))
plotvfonsph(THETA, PHI, E_th, E_ph, freq, vcoord='Ludwig3',
projection='orthographic')
# TobiaC (2013-06-17)
def projectdomain(theta_rad, phi_rad, F_th, F_ph, projection):
"""Convert spherical coordinates into various projections."""
projections = ['orthographic', 'azimuthal-equidistant', 'equirectangular']
if projection == 'orthographic':
# Fix check for theta>pi/2
# Plot hemisphere theta<pi/2
UHmask = theta_rad > math.pi/2
F_th = numpy.ma.array(F_th, mask=UHmask)
F_ph = numpy.ma.array(F_ph, mask=UHmask)
x = numpy.sin(theta_rad)*numpy.cos(phi_rad)
y = numpy.sin(theta_rad)*numpy.sin(phi_rad)
xyNames = ('l', 'm')
nom_xticks = None
elif projection == 'azimuthal-equidistant':
# 2D polar to cartesian conversion
# (put in offset)
x = theta_rad*numpy.cos(phi_rad)
y = theta_rad*numpy.sin(phi_rad)
xyNames = ('theta*cos(phi)', 'theta*sin(phi)')
nom_xticks = None
elif projection == 'equirectangular':
y = theta_rad
x = phi_rad
xyNames = ('phi', 'theta')
nom_xticks = None # [0,45,90,135,180,225,270,315,360]
else:
print("Supported projections are: {}".format(', '.join(projections)))
raise ValueError("Unknown map projection: {}".format(projection))
return x, y, xyNames, nom_xticks, F_th, F_ph
def lin2circ(vx, vy, isign=1):
"""Convert 2-vector from linear basis to circular basis. Output order L, R.
isign argument chooses sign of imaginary unit in phase convention.
(See Hamaker1996_III)"""
vl = (vx-isign*1j*vy)/math.sqrt(2)
vr = (vx+isign*1j*vy)/math.sqrt(2)
return vl, vr
def circ2lin(vl, vr, isign=1):
"""Convert 2-vector from circular basis to linear basis. Input order L, R.
isign argument chooses sign of imaginary unit in phase convention.
(See Hamaker1996_III)"""
vx = (vl+vr)/math.sqrt(2)
vy = isign*1j*(vl-vr)/math.sqrt(2)
return vx, vy
def vcoordconvert(F1, F2, phi_rad, vcoordlist):
"""Convert transverse vector components of field."""
# vcoords = ['Ludwig3', 'sph', 'circ', 'lin']
compname = ['F_', 'F_']
for vcoord in vcoordlist:
if vcoord == 'Ludwig3':
F1p, F2p = sph2Ludwig3(phi_rad, F1, F2)
compsuffix = ['u', 'v']
elif vcoord == 'sph':
F1p, F2p = F1, F2
compsuffix = ['theta', 'phi']
elif vcoord == 'circ':
F1p, F2p = lin2circ(F1, F2)
compsuffix = ['L', 'R']
elif vcoord == 'lin':
F1p, F2p = circ2lin(F1, F2)
compsuffix = ['X', 'Y']
else:
raise ValueError("Unknown vector coord sys")
compname = [compname[0]+compsuffix[0], compname[1]+compsuffix[1]]
F1, F2 = F1p, F2p
return F1, F2, compname
def cmplx2realrep(F_c, cmplx_rep):
"""Complex to real representation"""
if cmplx_rep == 'ReIm':
cmpopname_r0, cmpopname_r1 = 'Re', 'Im'
F_r0, F_r1 = numpy.real(F_c), numpy.imag(F_c)
elif cmplx_rep == 'AbsAng':
cmpopname_r0, cmpopname_r1 = 'Abs', 'Ang'
F_r0, F_r1 = numpy.absolute(F_c), numpy.rad2deg(numpy.angle(F_c))
else:
raise ValueError("Complex representation not known")
return (F_r0, F_r1), (cmpopname_r0, cmpopname_r1)
# FIXME: This function should be recast as refering to radial comp instead of
# freq.
def plotvfonsph(theta_rad, phi_rad, F_th, F_ph, freq=0.0,
vcoordlist=['sph'], projection='orthographic',
cmplx_rep='AbsAng', vfname='Unknown'):
"""Plot transverse vector field on sphere. Different projections are
supported as are different bases and complex value representations."""
x, y, xyNames, nom_xticks, F_th, F_ph = projectdomain(theta_rad, phi_rad,
F_th, F_ph,
projection)
F0_c, F1_c, compNames = vcoordconvert(F_th, F_ph, phi_rad,
vcoordlist=vcoordlist)
F0_2r, cmplxop0 = cmplx2realrep(F0_c, cmplx_rep)
F1_2r, cmplxop1 = cmplx2realrep(F1_c, cmplx_rep)
if projection == 'orthographic':
xyNames = [xyNames[0]+' []', xyNames[1]+' []']
if projection == 'azimuthal-equidistant':
x = numpy.rad2deg(x)
y = numpy.rad2deg(y)
xyNames = [xyNames[0]+' [deg.]', xyNames[1]+' [deg.]']
fig = plt.figure()
fig.suptitle(vfname+' @ '+str(freq/1e6)+' MHz'+', '
+ 'projection: '+projection)
def plotcomp(vcmpi, cpi, zcomp, cmplxop, xyNames, nom_xticks):
if cmplxop[cpi] == 'Ang':
cmap = plt.get_cmap('hsv')
else:
cmap = plt.get_cmap('viridis')
plt.pcolormesh(x, y, zcomp[cpi], cmap=cmap)
if nom_xticks is not None:
plt.xticks(nom_xticks)
# FIX next line
ax.set_title(cmplxop[cpi]+'('+compNames[vcmpi]+')')
plt.xlabel(xyNames[0])
plt.ylabel(xyNames[1])
plt.grid()
plt.colorbar()
if projection == 'equirectangular':
ax.invert_yaxis()
ax = plt.subplot(221, polar=False)
plotcomp(0, 0, F0_2r, cmplxop0, xyNames, nom_xticks)
ax = plt.subplot(222, polar=False)
plotcomp(0, 1, F0_2r, cmplxop0, xyNames, nom_xticks)
ax = plt.subplot(223, polar=False)
plotcomp(1, 0, F1_2r, cmplxop1, xyNames, nom_xticks)
ax = plt.subplot(224, polar=False)
plotcomp(1, 1, F1_2r, cmplxop1, xyNames, nom_xticks)
plt.show()
def plotvfonsph3D(theta_rad, phi_rad, E_th, E_ph, freq=0.0,
vcoord='sph', projection='equirectangular'):
PLOT3DTYPE = "quiver"
(x, y, z) = sph2crtISO(theta_rad, phi_rad)
from mayavi import mlab
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
if PLOT3DTYPE == "MESH_RADIAL":
r_Et = numpy.abs(E_th)
r_Etmx = numpy.amax(r_Et)
mlab.mesh(r_Et*(x)-1*r_Etmx, r_Et*y, r_Et*z, scalars=r_Et)
r_Ep = numpy.abs(E_ph)
r_Epmx = numpy.amax(r_Ep)
mlab.mesh(r_Ep*(x)+1*r_Epmx, r_Ep*y, r_Ep*z, scalars=r_Ep)
elif PLOT3DTYPE == "quiver":
# Implement quiver plot
s2cmat = getSph2CartTransfMatT(numpy.array([x, y, z]))
E_r = numpy.zeros(E_th.shape)
E_fldsph = numpy.rollaxis(numpy.array([E_r, E_ph, E_th]), 0, 3
)[..., numpy.newaxis]
E_fldcrt = numpy.rollaxis(numpy.matmul(s2cmat, E_fldsph).squeeze(),
2, 0)
mlab.quiver3d(x+1.5, y, z,
numpy.real(E_fldcrt[0]),
numpy.real(E_fldcrt[1]),
numpy.real(E_fldcrt[2]))
mlab.quiver3d(x-1.5, y, z,
numpy.imag(E_fldcrt[0]),
numpy.imag(E_fldcrt[1]),
numpy.imag(E_fldcrt[2]))
mlab.show()
def sph2Ludwig3(azl, EsTh, EsPh):
"""Input: an array of theta components and an array of phi components.
Output: an array of Ludwig u components and array Ludwig v.
Ref Ludwig1973a."""
EsU = EsTh*numpy.sin(azl)+EsPh*numpy.cos(azl)
EsV = EsTh*numpy.cos(azl)-EsPh*numpy.sin(azl)
return EsU, EsV
def Ludwig32sph(azl, EsU, EsV):
EsTh = EsU*numpy.sin(azl)+EsV*numpy.cos(azl)
EsPh = EsU*numpy.cos(azl)-EsV*numpy.sin(azl)
return EsTh, EsPh
|
2baOrNot2ba/AntPat
|
antpat/reps/sphgridfun/tvecfun.py
|
Python
|
isc
| 21,293
|
[
"Mayavi"
] |
86cfc63df249fd9c15fc2fc59239879cce7cd6f05662707efcae186f302e1fc4
|
#!/usr/bin/python
#
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import sys
import struct
import math
import string
from OSCUtils import *
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.typetags = ","
self.message = ""
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = ","
self.message = ""
def append(self, argument, typehint = None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + data
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
|
shouldmakemusic/yaas
|
LiveOSC/OSCMessage.py
|
Python
|
gpl-2.0
| 2,574
|
[
"VisIt"
] |
77bcd241ae2e9f30e4d2d946b94873a16c7f94b8d2fe269a733cd2eb96a31deb
|
"""
Module containing analysis functions for raster datasets.
"""
import itertools, operator
from .data import *
from . import manager
from .. import vector
import PIL.Image, PIL.ImageMath, PIL.ImageStat, PIL.ImageMorph
import math
# Zonal aggregation
def zonal_statistics(zonaldata, valuedata, zonalband=0, valueband=0, outstat="mean", nodataval=-999):
"""
Summarizes values of a raster dataset in groups or regions defined by a zonal dataset, which
can be either vector data or a categorical raster.
For each unique zone in "zonaldata" (each feature in the case of vector data), summarizes "valuedata"
cells that overlaps that zone.
Which band to use must be specified for each with "zonalband" and "valueband".
The "outstat" statistics option can be one of: mean (default), median, max, min, stdev, var, count, or sum
NOTE: For now, both must have same crs, no auto conversion done under the hood.
"""
# handle zonaldata being vector type
if not isinstance(zonaldata, RasterData):
zonaldata = manager.rasterize(zonaldata, **valuedata.rasterdef)
zonaldata = zonaldata.conditional("val > 0") # necessary bc rasterize returns 8bit instead of binary
# resample value grid into zonal grid
if zonaldata.affine != valuedata.affine:
valuedata = manager.resample(valuedata, **zonaldata.rasterdef)
# pick one band for each
zonalband = zonaldata.bands[zonalband]
valueband = valuedata.bands[valueband]
# create output image, using nullzone as nullvalue
georef = dict(width=valuedata.width, height=valuedata.height,
affine=valuedata.affine)
outrast = RasterData(mode="float32", **georef)
outrast.add_band(nodataval=nodataval)
# get stats for each unique value in zonal data
zonevalues = (val for count,val in zonalband.img.getcolors(zonaldata.width*zonaldata.height))
zonesdict = {}
#print zonalband, zonalband.summarystats()
#zonalband.view()
#valueband.view()
for zoneval in zonevalues:
# exclude nullzone
if zoneval == zonalband.nodataval: continue
#print "zone",zoneval
# mask valueband to only the current zone
curzone = valueband.copy()
#print "copy"
#print curzone.summarystats()
#curzone.view() #.img.show()
curzone.mask = zonalband.conditional("val != %s" % zoneval).img # returns true everywhere, which is not correct..., maybe due to nodataval???
#print "cond",zoneval
#print zonalband.conditional("val != %s" % zoneval).summarystats()
#zonalband.conditional("val != %s" % zoneval).view() #img.show()
#print "mask"
#print curzone.summarystats()
#curzone.view() #img.show()
# also exclude null values from calculations
curzone.mask = valueband.mask # pastes additional nullvalues
curzone._cached_mask = None # force having to recreate the mask using the combined old and pasted nullvals
#print "mask2", curzone
#print curzone.summarystats()
#curzone.view() #img.show()
# retrieve stats
stats = curzone.summarystats(outstat)
zonesdict[zoneval] = stats
# write chosen stat to outimg
if stats[outstat] is None:
stats[outstat] = nodataval
outrast.bands[0].img.paste(stats[outstat], mask=curzone.mask)
return zonesdict, outrast
# Raster math
def mathexpr(mathexpr, rasters):
"""Performs math operations on one or more raster datasets.
The math is given in "mathexpr" as a string expression, where each input raster is
referred to as "rast1", "rast2", etc, according to their order in the input raster list.
Supports all of Python's math expressions. Logical operations like == or > are also supported
and will return binary rasters.
TODO: For now just uses band 0 for each raster, should add support for specifying bands.
TODO: Check that all math works correctly, such as divide and floats vs ints.
Alias: Raster algebra.
"""
#print rasters
# align all to same affine
rasters = (rast for rast in rasters)
reference = next(rasters)
def _aligned():
yield reference
for rast in rasters:
if rast.affine != reference.affine:
rast = manager.resample(rast, width=reference.width, height=reference.height, affine=reference.affine)
yield rast
# convert all nullvalues to zero before doing any math
def _nulled():
for rast in _aligned():
for band in rast:
# TODO: recode here somehow blanks out everything...
#band.recode("val == %s"%band.nodataval, 0.0)
pass
yield rast
# calculate math
# basic math + - * / ** %
# note: logical ops ~ & | ^ makes binary mask and return the pixel value where mask is valid
# note: relational ops < > == != return only binary mask
# note: other useful is min() and max(), equiv to (r1 < r2) | r2
rastersdict = dict([("rast%i"%(i+1),rast.bands[0].img)#.convert("F"))
for i,rast in enumerate(_nulled())])
img = PIL.ImageMath.eval(mathexpr, **rastersdict)
# should maybe create a combined mask of nullvalues for all rasters
# and filter away those nullcells from math result
# ...
# return result
outraster = RasterData(image=img, **reference.meta)
return outraster
# Interpolation
def interpolate(pointdata, rasterdef, valuefield=None, algorithm="idw", **kwargs):
"""Exact interpolation between point data values. Original values are kept intact.
The raster extent and cell size on which to interpolate must be defined in "rasterdef".
First all points are burnt onto the output raster. By default, each point counts as a value of 1,
but "valuefield" can also be set to a field name that determies the relative weight of each
point feature.
When multiple points land in the same output cell, the point values are aggregated according
to "aggval" (defaults to mean) to determine the cell's final value.
When the points are converted to cell values, the remaining cells without any point features are
interpolated.
NOTE: The algorithm for interpolating is set with "algorithm", but currently only allows "idw" or
inverse distance weighting.
TODO: Add spline, kdtree, and kriging methods.
"""
# some links
#http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html
#https://github.com/JohannesBuchner/regulargrid
#http://stackoverflow.com/questions/24978052/interpolation-over-regular-grid-in-python
#http://www.qgistutorials.com/en/docs/creating_heatmaps.html
#see especially: http://resources.arcgis.com/en/help/main/10.1/index.html#//009z0000000v000000
# TODO: require aggfunc with exception...
if not pointdata.type == "Point":
raise Exception("Pointdata must be of type point")
if rasterdef["mode"] == "1bit":
raise Exception("Cannot do interpolation to a 1bit raster")
algorithm = algorithm.lower()
if algorithm == "idw":
# create output raster
raster = RasterData(**rasterdef)
newband = raster.add_band() # add empty band
# default options
neighbours = kwargs.get("neighbours")
sensitivity = kwargs.get("sensitivity")
aggfunc = kwargs.get("aggfunc", "mean")
# collect counts or sum field values
from ..vector import sql
def key(feat):
x,y = feat.geometry["coordinates"]
px,py = raster.geo_to_cell(x,y)
return px,py
def valfunc(feat):
val = feat[valuefield] if valuefield else 1
return val
fieldmapping = [("aggval",valfunc,aggfunc)]
points = dict()
for (px,py),feats in itertools.groupby(pointdata, key=key):
aggval = sql.aggreg(feats, fieldmapping)[0]
if isinstance(aggval,(int,float)): # only consider numeric values, ignore missing etc
points[(px,py)] = aggval
# retrieve input options
if neighbours == None:
# TODO: not yet implemented
neighbours = int(len(points)*0.10) #default neighbours is 10 percent of known points
if sensitivity == None:
sensitivity = 3 #same as power, ie that high sensitivity means much more effect from far away pointss
# some precalcs
senspow = (-sensitivity/2.0)
# some defs
def _calcvalue(gridx, gridy, points):
weighted_values_sum = 0.0
sum_of_weights = 0.0
for (px,py),pval in points.items():
weight = ((gridx-px)**2 + (gridy-py)**2)**senspow
sum_of_weights += weight
weighted_values_sum += weight * pval
return weighted_values_sum / sum_of_weights
# calculate values
for gridy in range(raster.height):
for gridx in range(raster.width):
newval = points.get((gridx,gridy))
if newval != None:
# gridxy to calculate is exact same as one of the point xy, so just use same value
pass
else:
# main calc
newval = _calcvalue(gridx, gridy, points)
newband.set(gridx,gridy,newval)
elif algorithm == "spline":
# see C scripts at http://davis.wpi.edu/~matt/courses/morph/2d.htm
# looks simple enough
# ...
raise Exception("Not yet implemented")
elif algorithm == "kdtree":
# https://github.com/stefankoegl/kdtree
# http://rosettacode.org/wiki/K-d_tree
raise Exception("Not yet implemented")
elif algorithm == "kriging":
# ...?
raise Exception("Not yet implemented")
else:
raise Exception("Not a valid interpolation algorithm")
return raster
def smooth(pointdata, rasterdef, valuefield=None, algorithm="radial", **kwargs):
"""
Bins and aggregates point data values, followed by simple value smearing to produce a smooth surface raster.
Different from interpolation in that the new values do not exactly pass through the original values.
The raster extent and cell size on which to smooth must be defined in "rasterdef".
Smoothing works by considering a region around each pixel, specified by "algorithm".
Supported binning regions include:
- "radial" (default): a circle of size "radius";
- "gauss": a Gaussian statistical function applied to the distance-weighted average of pixels
within "radius" distance of the output pixel.
The points considered to be part of that region
are then summarized with a statistic as determined by "aggfunc" (defaults to sum) and used as the pixel
value. For the Gaussian method, this is the function used to aggregate points to pixels before
blurring.
By default, each point counts as a value of 1,
but "valuefield" can also be set to a field name that determies the relative weight of each
point feature.
TODO: Add more methods such as box convolving.
Alias: convolve, blur, heatmap (but incorrect usage).
"""
# TODO: this assumes points, but isnt smoothing generally understood to apply to existing rasters?
# ...or are these the same maybe?
if not pointdata.type == "Point":
raise Exception("Pointdata must be of type point")
if rasterdef["mode"] == "1bit":
raise Exception("Cannot do interpolation to a 1bit raster")
algorithm = algorithm.lower()
if algorithm == "radial":
# create output raster
raster = RasterData(**rasterdef)
raster.add_band() # add empty band
band = raster.bands[0]
# calculate for each cell
if not hasattr(pointdata, "spindex"):
pointdata.create_spatial_index()
raster.convert("float32") # output will be floats
if not "radius" in kwargs:
raise Exception("Radius must be set for 'radial' method")
rad = float(kwargs["radius"])
c = None
for cell in band:
#if c != cell.row:
# print cell.row
# c = cell.row
px,py = cell.col,cell.row
x,y = raster.cell_to_geo(px,py)
def weights():
for feat in pointdata.quick_overlap([x-rad,y-rad,
x+rad,y+rad]):
fx,fy = feat.geometry["coordinates"] # assumes single point
dist = math.sqrt((fx-x)**2 + (fy-y)**2)
if dist <= rad:
weight = feat[valuefield] if valuefield else 1
yield weight * (1 - (dist / rad))
from ..vector import sql
valfunc = lambda v: v
aggfunc = kwargs.get("aggfunc", "sum")
fieldmapping = [("aggval",valfunc,aggfunc)]
aggval = sql.aggreg(weights(), fieldmapping)[0]
if aggval or aggval == 0:
cell.value = aggval
elif algorithm == "gauss":
# create output raster
raster = RasterData(**rasterdef)
raster.add_band() # add empty band
newband = raster.bands[0]
# collect counts or sum field values
from ..vector import sql
def key(feat):
x,y = feat.geometry["coordinates"]
px,py = raster.geo_to_cell(x,y)
return px,py
def valfunc(feat):
val = feat[valuefield] if valuefield else 1
return val
aggfunc = kwargs.get("aggfunc", "sum")
fieldmapping = [("aggval",valfunc,aggfunc)]
for (px,py),feats in itertools.groupby(pointdata, key=key):
aggval = sql.aggreg(feats, fieldmapping)[0]
newband.set(px,py, aggval)
# apply gaussian filter
if raster.mode.endswith("8"):
# PIL gauss filter only work on L mode images
import PIL, PIL.ImageOps, PIL.ImageFilter
rad = kwargs.get("radius", 3)
filt = PIL.ImageFilter.GaussianBlur(radius=rad)
newband.img = newband.img.filter(filt)
else:
# Gauss calculation in pure Python
# algorithm 1 from http://blog.ivank.net/fastest-gaussian-blur.html
# TODO: implement much faster algorithm 4
# TODO: output seems to consider a square around each feat, shouldnt it be circle
# TODO: output values are very low decimals, is that correct? maybe it's just a
# ...probability weight that has to be appleied to orig value?
# check out: https://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm
origband = newband.copy()
raster.convert("float32") # output values will be floats
rad = kwargs.get("radius", 3)
rs = int(rad*2.57+1) # significant radius
# some precalcs
rr2 = 2*rad*rad
prr2 = float(math.pi*2*rad*rad)
exp = math.exp
for i in range(raster.height):
#print i
for j in range(raster.width):
val = 0.0
wsum = 0.0
for iy in range(i-rs, i+rs+1):
for ix in range(j-rs, j+rs+1):
x = min([raster.width-1, max([0,ix])])
y = min([raster.height-1, max([0,iy])])
dsq = (ix-j)*(ix-j)+(iy-i)*(iy-i)
weight = exp(-dsq/rr2) / prr2
val += origband.get(x,y).value * weight
wsum += weight
newval = val/wsum
#print j,i,newval
newband.set(j,i, newval)
elif algorithm == "box":
# http://stackoverflow.com/questions/6652671/efficient-method-of-calculating-density-of-irregularly-spaced-points
# ...
pass
else:
raise Exception("Not a valid smoothing algorithm")
return raster
def density(pointdata, rasterdef, algorithm="radial", **kwargs):
"""Creates a raster of the density of points, ie the frequency of their occurance
without thinking about the values of each point. Same as using the smooth function
without setting the valuefield."""
# only difference being no value field contributes to heat
# TODO: allow density of linear and polygon features too,
# maybe by counting nearby features
return smooth(pointdata, rasterdef, valuefield=None, algorithm=algorithm, **kwargs)
def disperse(vectordata, valuekey, weight=None, **rasterdef):
"""Disperses values in a vector dataset based on a raster dataset containing weights.
If the raster weight is not given, then a raster geotransform must be given and the
value is divided into equal portions for all the cells.
After each feature disperses its values into cells, the sum of those cells should always equal
the original feature value. However, in the case of features that overlap each other, cells will
added on top of each other, and there will be no way of reconstructing how much of a cell's value
belonged to one feature or the other.
Returns a raster dataset of the dispersed values.
"""
if weight:
outrast = RasterData(mode="float32", **weight.rasterdef)
else:
outrast = RasterData(mode="float32", **rasterdef)
outband = outrast.add_band()
outband.nodataval = None
for feat in vectordata:
if not feat.geometry:
continue
featdata = vector.data.VectorData(features=[feat])
if weight:
featweight = manager.clip(weight, featdata)
else:
featweight = manager.rasterize(featdata, **outrast.rasterdef)
# TODO: Does clip and rasterize write nodataval to nonvalid areas? Is this correct?
# Unless nodataval is reset, those then prevent correct math operations somehow...
featweight.bands[0].nodataval = None
weightsum = featweight.bands[0].summarystats("sum")["sum"]
if weightsum is None:
continue
weightprop = featweight.bands[0] / float(weightsum) / 255.0 # / 255 is a hack, have to decide if binary rasters should be 1 or 255.
total = valuekey(feat)
weightvalue = weightprop * total
weightvalue.nodataval = None
outband = outband + weightvalue
outrast.bands[0] = outband
return outrast
# Distance Analysis
def distance(data, **rasterdef):
"""Calculates raster of distances to nearest feature in vector data.
Output raster extent and cell size must be set with keyword arguments.
Uses fast approach that rasterizes the edge of the vector data and only compares
distances to each edge pixel, significantly reducing time for complex geometries.
TODO: Distances are measured using eucledian distance, should also allow option for geodetic.
"""
# TODO: allow max dist limit
if isinstance(data, RasterData):
raise NotImplementedError("Distance tool requires vector data")
from shapely.geometry import Point, MultiPoint, LineString, asShape
outrast = RasterData(mode="float32", **rasterdef)
outband = outrast.add_band() # make sure all values are set to 0 dist at outset
fillband = manager.rasterize(data, **rasterdef).bands[0]
# ALT1: each pixel to each feat
# TODO: this approach is super slow...
## geoms = [feat.get_shapely() for feat in data]
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## #print "calc..."
## point = Point(cell.x,cell.y) #asShape(cell.point)
## dist = point.distance(geoms[0]) #min((point.distance(g) for g in geoms))
## #print cell.col,cell.row,dist
## outband.set(cell.col, cell.row, dist)
## else:
## pass #print "already set", cell.value
# ALT2: each pixel to union
## # TODO: this approach gets stuck...
##
## import shapely
## outline = shapely.ops.cascaded_union([feat.get_shapely() for feat in data])
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## #print "calc..."
## point = Point(cell.x,cell.y)
## dist = point.distance(outline)
## print cell.col,cell.row,dist
## outband.set(cell.col, cell.row, dist)
## else:
## pass #print "already set", cell.value
# ALT3: each pixel to each rasterized edge pixel
# Pixel to pixel inspiration from: https://trac.osgeo.org/postgis/wiki/PostGIS_Raster_SoC_Idea_2012/Distance_Analysis_Tools/document
# TODO: maybe shouldnt be outline points but outline line, to calc dist between points too?
# TODO: current morphology approach gets crazy for really large rasters
# maybe optimize by simplifying multiple points on straight line, and make into linestring
#outlineband = manager.rasterize(data.convert.to_lines(), **rasterdef).bands[0]
## outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
## print "outlinepixels",len(outlinepixels)
##
## outlinepoints = MultiPoint([outrast.cell_to_geo(*px) for px in outlinepixels])
##
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## point = Point(cell.x,cell.y)
## dist = point.distance(outlinepoints)
## outband.set(cell.col, cell.row, dist)
# ALT4: each pixel to each rasterized edge pixel, with spindex
#outlineband = manager.rasterize(data.convert.to_lines(), **rasterdef).bands[0]
outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
print("outlinepixels",len(outlinepixels))
import rtree
spindex = rtree.index.Index()
outlinepoints = [outrast.cell_to_geo(*px) for px in outlinepixels]
for i,p in enumerate(outlinepoints):
bbox = list(p) + list(p)
spindex.insert(i, bbox)
for cell in fillband:
if cell.value == 0:
# only calculate where vector is absent
bbox = [cell.x, cell.y, cell.x, cell.y]
nearestid = next(spindex.nearest(bbox, num_results=1))
point = cell.x,cell.y
otherpoint = outlinepoints[nearestid]
dist = math.hypot(point[0]-otherpoint[0], point[1]-otherpoint[1])
outband.set(cell.col, cell.row, dist)
# ALT5: each pixel to reconstructed linestring of rasterized edge pixels, superfast if can reconstruct
## outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
##
## # TODO: reconstruct linestring from outlinepixels...
## outline = LineString([outrast.cell_to_geo(*px) for px in outlinepixels])
##
## # TODO: simplify linestring...
#### print "outlinepixels",len(outlinepixels)
#### simplified = PIL.ImagePath.Path(outlinepixels)
#### simplified.compact(2) # 2 px
#### outlinepixels = simplified.tolist()
#### print "simplified",len(outlinepixels)
##
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## point = Point(cell.x,cell.y)
## dist = point.distance(outline)
## outband.set(cell.col, cell.row, dist)
# ALT6: incremental neighbour growth check overlap
# ie
#im = fillband.img
#for _ in range(32):
# count,im = PIL.ImageMorph.MorphOp(op_name="erosion4").apply(im)
#im.show()
# ...
return outrast
# Morphology
def morphology(raster, selection, pattern, bandnum=0):
"""General purpose morphology pattern operations, returning binary raster.
First, "selection" is a conditional expression converting the raster to binary,
defining which vales to interpret as on-values.
Then, an algorithm analyzes the on-values and looks for the pattern set in "pattern",
which includes "edge", "dilation", "erosion", or a manual input input string as
expected by PIL.ImageMorph.
"""
premask = raster.mask
cond = raster.bands[bandnum].conditional(selection)
count,im = PIL.ImageMorph.MorphOp(op_name=pattern).apply(cond.img)
out = RasterData(image=im, **raster.rasterdef)
out.mask = premask
return out
# Path Analysis
def least_cost_path(point1, point2, **options):
# use https://github.com/elemel/python-astar
# maybe also: https://www.codeproject.com/articles/9040/maze-solver-shortest-path-finder
pass
# Terrain Analysis
def viewshed(point, direction, height, raster, **kwargs):
pass
def slope(raster):
pass
|
karimbahgat/PythonGis
|
pythongis/raster/analyzer.py
|
Python
|
mit
| 25,155
|
[
"Gaussian"
] |
b4324153bf174d79c32b3ee5048e959583c5fd42fa784423578083274f56d745
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/Person/_HasLDS.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasldsbase import HasLDSBase
#-------------------------------------------------------------------------
#
# HasLDS
#
#-------------------------------------------------------------------------
class HasLDS(HasLDSBase):
"""Rule that checks for a person with a LDS event"""
name = _('People with <count> LDS events')
description = _("Matches people with a certain number of LDS events")
|
Forage/Gramps
|
gramps/gen/filters/rules/person/_haslds.py
|
Python
|
gpl-2.0
| 1,770
|
[
"Brian"
] |
24bdb44e3e65211b42b49481c702f2fc5215761696810d4f1855e31a4eff85c6
|
from openpnm.network import GenericNetwork, Cubic
from openpnm import topotools
from openpnm.utils import logging, Workspace
import numpy as np
logger = logging.getLogger(__name__)
ws = Workspace()
class Bravais(GenericNetwork):
r"""
Crystal lattice types including fcc, bcc, sc, and hcp
These arrangements not only allow more dense packing than the standard
Cubic for higher porosity materials, but also have more interesting
non-straight connections between the various pore sites.
More information on Bravais lattice notation can be `found on wikipedia
<https://en.wikipedia.org/wiki/Bravais_lattice>`_.
Parameters
----------
shape : array_like
The number of pores in each direction. This value is a bit ambiguous
for the more complex unit cells used here, but generally refers to the
the number for 'corner' sites
spacing : array_like (optional)
The spacing between pores in all three directions. Like the ``shape``
this is a bit ambiguous but refers to the spacing between corner sites.
Essentially it controls the dimensions of the unit cell. It a scalar
is given it is applied to all directions. The default is 1.
mode : string
The type of lattice to create. Options are:
- 'sc' : Simple cubic (Same as ``Cubic``)
- 'bcc' : Body-centered cubic lattice
- 'fcc' : Face-centered cubic lattice
- 'hcp' : Hexagonal close packed (Note Implemented Yet)
name : string
An optional name for the object to help identify it. If not given,
one will be generated.
project : OpenPNM Project object, optional
Each OpenPNM object must be part of a Project. If none is supplied
then one will be created and this Network will be automatically
assigned to it. To create a Project use ``openpnm.Project()``.
See Also
--------
Cubic
CubicDual
Notes
-----
The pores are labelled as beloning to 'corner_sites' and 'body_sites' in
bcc or 'face_sites' in fcc. Throats are labelled by the which type of
pores they connect, e.g. 'throat.corner_to_body'.
Limitations:
* Bravais lattice can also have a skew to them, but this is not implemented
yet.
* Support for 2D networks has not been added yet.
* Hexagonal Close Packed (hcp) has not been implemented yet, but is on the
todo list.
Examples
--------
>>> import openpnm as op
>>> sc = op.network.Bravais(shape=[3, 3, 3], mode='sc')
>>> bcc = op.network.Bravais(shape=[3, 3, 3], mode='bcc')
>>> fcc = op.network.Bravais(shape=[3, 3, 3], mode='fcc')
>>> sc.Np, bcc.Np, fcc.Np
(27, 35, 63)
Since these three networks all have the same domain size, it is clear that
both 'bcc' and 'fcc' have more pores per unit volume. This is particularly
helpful for modeling higher porosity materials.
They all have the same number corner sites, which corresponds to the
[3, 3, 3] shape that was specified:
>>> sc.num_pores('corner*'), bcc.num_pores('cor*'), fcc.num_pores('cor*')
(27, 27, 27)
Visualization of these three networks can be done quickly using the
functions in topotools. Firstly, merge them all into a single network
for convenience:
>>> bcc['pore.coords'][:, 0] += 3
>>> fcc['pore.coords'][:, 0] += 6
>>> op.topotools.merge_networks(sc, [bcc, fcc])
>>> fig = op.topotools.plot_connections(sc)
.. image:: /../docs/static/images/bravais_networks.png
:align: center
For larger networks and more control over presentation use `Paraview
<http://www.paraview.org>`_.
"""
def __init__(self, shape, mode, spacing=1, **kwargs):
super().__init__(**kwargs)
shape = np.array(shape)
if np.any(shape < 2):
raise Exception('Bravais lattice networks must have at least 2 '
'pores in all directions')
if mode == 'bcc':
# Make a basic cubic for the coner pores
net1 = Cubic(shape=shape)
net1['pore.net1'] = True
# Create a smaller cubic for the body pores, and shift it
net2 = Cubic(shape=shape-1)
net2['pore.net2'] = True
net2['pore.coords'] += 0.5
# Stitch them together
topotools.stitch(net1, net2, net1.Ps, net2.Ps, len_max=0.99)
self.update(net1)
ws.close_project(net1.project)
# Deal with labels
Ps1 = self['pore.net2']
self.clear(mode='labels')
self['pore.corner_sites'] = ~Ps1
self['pore.body_sites'] = Ps1
Ts = self.find_neighbor_throats(pores=self.pores('body_sites'),
mode='exclusive_or')
self['throat.corner_to_body'] = False
self['throat.corner_to_body'][Ts] = True
Ts = self.find_neighbor_throats(pores=self.pores('corner_sites'),
mode='xnor')
self['throat.corner_to_corner'] = False
self['throat.corner_to_corner'][Ts] = True
Ts = self.find_neighbor_throats(pores=self.pores('body_sites'),
mode='xnor')
self['throat.body_to_body'] = False
self['throat.body_to_body'][Ts] = True
elif mode == 'fcc':
shape = np.array(shape)
# Create base cubic network of corner sites
net1 = Cubic(shape=shape)
# Create 3 networks to become face sites
net2 = Cubic(shape=shape - [1, 1, 0])
net3 = Cubic(shape=shape - [1, 0, 1])
net4 = Cubic(shape=shape - [0, 1, 1])
net2['pore.coords'] += np.array([0.5, 0.5, 0])
net3['pore.coords'] += np.array([0.5, 0, 0.5])
net4['pore.coords'] += np.array([0, 0.5, 0.5])
# Remove throats from net2 (trim doesn't work when removing ALL)
for n in [net2, net3, net4]:
n.clear(element='throat', mode='all')
n.update({'throat.all': np.array([], dtype=bool)})
n.update({'throat.conns': np.ndarray([0, 2], dtype=bool)})
# Join networks 2, 3 and 4 into one with all face sites
topotools.stitch(net2, net3, net2.Ps, net3.Ps,
len_min=0.70, len_max=0.75)
topotools.stitch(net2, net4, net2.Ps, net4.Ps,
len_min=0.70, len_max=0.75)
# Join face sites network with the corner sites network
topotools.stitch(net1, net2, net1.Ps, net2.Ps,
len_min=0.70, len_max=0.75)
self.update(net1)
ws.close_project(net1.project)
# Deal with labels
self.clear(mode='labels')
Ps = np.any(np.mod(self['pore.coords'], 1) == 0, axis=1)
self['pore.face_sites'] = Ps
self['pore.corner_sites'] = ~Ps
Ts = self.find_neighbor_throats(pores=self.pores('corner_sites'),
mode='xnor')
self['throat.corner_to_corner'] = False
self['throat.corner_to_corner'][Ts] = True
Ts = self.find_neighbor_throats(pores=self.pores('face_sites'))
self['throat.corner_to_face'] = False
self['throat.corner_to_face'][Ts] = True
elif mode == 'hcp':
raise NotImplementedError('hcp is not implemented yet')
elif mode == 'sc':
net = Cubic(shape=shape, spacing=1)
self.update(net)
ws.close_project(net.project)
self.clear(mode='labels')
self['pore.corner_sites'] = True
self['throat.corner_to_corner'] = True
else:
raise Exception('Unrecognized lattice type: ' + mode)
# Finally scale network to specified spacing
topotools.label_faces(self)
Ps = self.pores(['left', 'right', 'top', 'bottom', 'front', 'back'])
Ps = self.tomask(pores=Ps)
self['pore.surface'] = Ps
self['pore.internal'] = ~Ps
self['pore.coords'] *= np.array(spacing)
def add_boundary_pores(self, labels, spacing):
r"""
Add boundary pores to the specified faces of the network
Pores are offset from the faces by 1/2 of the given ``spacing``, such
that they lie directly on the boundaries.
Parameters
----------
labels : string or list of strings
The labels indicating the pores defining each face where boundary
pores are to be added (e.g. 'left' or ['left', 'right'])
spacing : scalar or array_like
The spacing of the network (e.g. [1, 1, 1]). This must be given
since it can be quite difficult to infer from the network,
for instance if boundary pores have already added to other faces.
"""
spacing = np.array(spacing)
if spacing.size == 1:
spacing = np.ones(3)*spacing
for item in labels:
Ps = self.pores(item)
coords = np.absolute(self['pore.coords'][Ps])
axis = np.count_nonzero(np.diff(coords, axis=0), axis=0) == 0
offset = np.array(axis, dtype=int)/2
if np.amin(coords) == np.amin(coords[:, np.where(axis)[0]]):
offset = -1*offset
topotools.add_boundary_pores(network=self, pores=Ps, offset=offset,
apply_label=item + '_boundary')
|
TomTranter/OpenPNM
|
openpnm/network/Bravais.py
|
Python
|
mit
| 9,623
|
[
"CRYSTAL",
"ParaView"
] |
e8ff1d61d09dbfa7356a2d9019df7aeeceeb8adaae8090b59a1e48cd6243246a
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
# `distutils` must be imported after `setuptools` or it will cause explosions
# with `setuptools >=48.0.0, <49.1`.
# Refs:
# * https://github.com/ansible/ansible/issues/70456
# * https://github.com/pypa/setuptools/issues/2230
# * https://github.com/pypa/setuptools/commit/bd110264
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
def find_package_info(*file_paths):
try:
with open(os.path.join(*file_paths), 'r') as f:
info_file = f.read()
except Exception:
raise RuntimeError("Unable to find package info.")
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
if version_match and author_match:
return version_match.group(1), author_match.group(1)
raise RuntimeError("Unable to find package info.")
def _validate_install_ansible_core():
"""Validate that we can install ansible-core. This checks if
ansible<=2.9 or ansible-base>=2.10 are installed.
"""
# Skip common commands we can ignore
# Do NOT add bdist_wheel here, we don't ship wheels
# and bdist_wheel is the only place we can prevent pip
# from installing, as pip creates a wheel, and installs the wheel
# and we have no influence over installation within a wheel
if set(('sdist', 'egg_info')).intersection(sys.argv):
return
if os.getenv('ANSIBLE_SKIP_CONFLICT_CHECK', '') not in ('', '0'):
return
# Save these for later restoring things to pre invocation
sys_modules = sys.modules.copy()
sys_modules_keys = set(sys_modules)
# Make sure `lib` isn't in `sys.path` that could confuse this
sys_path = sys.path[:]
abspath = os.path.abspath
sys.path[:] = [p for p in sys.path if abspath(p) != abspath('lib')]
try:
from ansible.release import __version__
except ImportError:
pass
else:
version_tuple = tuple(int(v) for v in __version__.split('.')[:2])
if version_tuple >= (2, 11):
return
elif version_tuple == (2, 10):
ansible_name = 'ansible-base'
else:
ansible_name = 'ansible'
stars = '*' * 76
raise RuntimeError(
'''
%s
Cannot install ansible-core with a pre-existing %s==%s
installation.
Installing ansible-core with ansible-2.9 or older, or ansible-base-2.10
currently installed with pip is known to cause problems. Please uninstall
%s and install the new version:
pip uninstall %s
pip install ansible-core
If you want to skip the conflict checks and manually resolve any issues
afterwards, set the ANSIBLE_SKIP_CONFLICT_CHECK environment variable:
ANSIBLE_SKIP_CONFLICT_CHECK=1 pip install ansible-core
%s
''' % (stars, ansible_name, __version__, ansible_name, ansible_name, stars))
finally:
sys.path[:] = sys_path
for key in sys_modules_keys.symmetric_difference(sys.modules):
sys.modules.pop(key, None)
sys.modules.update(sys_modules)
_validate_install_ansible_core()
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
.. warn::
We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently,
:command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become
real files on install. Updates to the heuristic here *must not* add them to the symlink
cache.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if target.startswith('/'):
# We do not support absolute symlinks at all
continue
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
else:
# Count how many directory levels from the topdir we are
levels_deep = os.path.dirname(filepath).count('/')
# Count the number of directory levels higher we walk up the tree in target
target_depth = 0
for path_component in target.split('/'):
if path_component == '..':
target_depth += 1
# If we walk past the topdir, then don't store
if target_depth >= levels_deep:
break
else:
target_depth -= 1
else:
# If we managed to stay within the tree, store the symlink
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[target].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlink_data = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise RuntimeError(
"Pregenerated symlink list was not present and expected "
"symlinks in ./bin were missing or broken. "
"Perhaps this isn't a git checkout?"
)
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlinks = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
_cache_symlinks(symlinks)
SDist.run(self)
# Print warnings at the end because no one will see warnings before all the normal status
# output
if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1':
warnings.warn('When setup.py sdist is run from outside of the Makefile,'
' the generated tarball may be incomplete. Use `make snapshot`'
' to create a tarball from an arbitrary checkout or use'
' `cd packaging/release && make release version=[..]` for official builds.',
RuntimeWarning)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
PYCRYPTO_DIST = 'pycrypto'
def get_crypto_req():
"""Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
pycrypto or cryptography. We choose a default but allow the user to
override it. This translates into pip install of the sdist deciding what
package to install and also the runtime dependencies that pkg_resources
knows about.
"""
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
if crypto_backend == PYCRYPTO_DIST:
# Attempt to set version requirements
return '%s >= 2.6' % PYCRYPTO_DIST
return crypto_backend or None
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': substitute_crypto_to_req(
read_requirements('requirements.txt'),
),
}
here = os.path.abspath(os.path.dirname(__file__))
__version__, __author__ = find_package_info(here, 'lib', 'ansible', 'release.py')
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible-core',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
'Documentation': 'https://docs.ansible.com/ansible/',
'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
package_dir={'': 'lib',
'ansible_test': 'test/lib/ansible_test'},
packages=find_packages('lib') + find_packages('test/lib'),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
'bin/ansible-test',
],
data_files=[],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
|
Fale/ansible
|
setup.py
|
Python
|
gpl-3.0
| 15,580
|
[
"Galaxy"
] |
d5abb02396368d644a242e65dfa64224a163f33e2392b2711a5bf62c129b011a
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import tempfile
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.abinit.inputs import (
BasicAbinitInput,
BasicMultiDataset,
ShiftMode,
calc_shiftk,
ebands_input,
gs_input,
ion_ioncell_relax_input,
num_valence_electrons,
)
from pymatgen.util.testing import PymatgenTest
_test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit")
def abiref_file(filename):
"""Return absolute path to filename in ~pymatgen/test_files/abinit"""
return os.path.join(_test_dir, filename)
def abiref_files(*filenames):
"""Return list of absolute paths to filenames in ~pymatgen/test_files/abinit"""
return [os.path.join(_test_dir, f) for f in filenames]
class AbinitInputTestCase(PymatgenTest):
"""Unit tests for BasicAbinitInput."""
def test_api(self):
"""Testing BasicAbinitInput API."""
# Build simple input with structure and pseudos
unit_cell = {
"acell": 3 * [10.217],
"rprim": [[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]],
"ntypat": 1,
"znucl": [14],
"natom": 2,
"typat": [1, 1],
"xred": [[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]],
}
inp = BasicAbinitInput(structure=unit_cell, pseudos=abiref_file("14si.pspnc"))
shiftk = [[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]
self.assertArrayEqual(calc_shiftk(inp.structure), shiftk)
assert num_valence_electrons(inp.structure, inp.pseudos) == 8
repr(inp), str(inp)
assert len(inp) == 0 and not inp
assert inp.get("foo", "bar") == "bar" and inp.pop("foo", "bar") == "bar"
assert inp.comment is None
inp.set_comment("This is a comment")
assert inp.comment == "This is a comment"
assert inp.isnc and not inp.ispaw
inp["ecut"] = 1
assert inp.get("ecut") == 1 and len(inp) == 1 and "ecut" in inp.keys() and "foo" not in inp
# Test to_string
assert inp.to_string(with_structure=True, with_pseudos=True)
assert inp.to_string(with_structure=False, with_pseudos=False)
inp.set_vars(ecut=5, toldfe=1e-6)
assert inp["ecut"] == 5
inp.set_vars_ifnotin(ecut=-10)
assert inp["ecut"] == 5
_, tmpname = tempfile.mkstemp(text=True)
inp.write(filepath=tmpname)
# Cannot change structure variables directly.
with self.assertRaises(inp.Error):
inp.set_vars(unit_cell)
with self.assertRaises(TypeError):
inp.add_abiobjects({})
with self.assertRaises(KeyError):
inp.remove_vars("foo", strict=True)
assert not inp.remove_vars("foo", strict=False)
# Test deepcopy and remove_vars.
inp["bdgw"] = [1, 2]
inp_copy = inp.deepcopy()
inp_copy["bdgw"][1] = 3
assert inp["bdgw"] == [1, 2]
assert inp.remove_vars("bdgw") and "bdgw" not in inp
removed = inp.pop_tolerances()
assert len(removed) == 1 and removed["toldfe"] == 1e-6
# Test set_spin_mode
old_vars = inp.set_spin_mode("polarized")
assert "nsppol" in inp and inp["nspden"] == 2 and inp["nspinor"] == 1
inp.set_vars(old_vars)
# Test set_structure
new_structure = inp.structure.copy()
new_structure.perturb(distance=0.1)
inp.set_structure(new_structure)
assert inp.structure == new_structure
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(inp, test_eq=False)
def test_input_errors(self):
"""Testing typical BasicAbinitInput Error"""
si_structure = Structure.from_file(abiref_file("si.cif"))
# Ambiguous list of pseudos.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_files("14si.pspnc", "14si.4.hgh"))
# Pseudos do not match structure.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_file("H-wdr.oncvpsp"))
si1_negative_volume = dict(
ntypat=1,
natom=1,
typat=[1],
znucl=14,
acell=3 * [7.60],
rprim=[[0.0, 0.5, 0.5], [-0.5, -0.0, -0.5], [0.5, 0.5, 0.0]],
xred=[[0.0, 0.0, 0.0]],
)
# Negative triple product.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si1_negative_volume, pseudos=abiref_files("14si.pspnc"))
def test_helper_functions(self):
"""Testing BasicAbinitInput helper functions."""
inp = BasicAbinitInput(structure=abiref_file("si.cif"), pseudos="14si.pspnc", pseudo_dir=_test_dir)
inp.set_kmesh(ngkpt=(1, 2, 3), shiftk=(1, 2, 3, 4, 5, 6))
assert inp["kptopt"] == 1 and inp["nshiftk"] == 2
inp.set_gamma_sampling()
assert inp["kptopt"] == 1 and inp["nshiftk"] == 1
assert np.all(inp["shiftk"] == 0)
inp.set_kpath(ndivsm=3, kptbounds=None)
assert inp["ndivsm"] == 3 and inp["iscf"] == -2 and len(inp["kptbounds"]) == 12
class TestMultiDataset(PymatgenTest):
"""Unit tests for BasicMultiDataset."""
def test_api(self):
"""Testing BasicMultiDataset API."""
structure = Structure.from_file(abiref_file("si.cif"))
pseudo = abiref_file("14si.pspnc")
pseudo_dir = os.path.dirname(pseudo)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo)
with self.assertRaises(ValueError):
BasicMultiDataset(structure=structure, pseudos=pseudo, ndtset=-1)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo, pseudo_dir=pseudo_dir)
assert len(multi) == 1 and multi.ndtset == 1
assert multi.isnc
for i, inp in enumerate(multi):
assert list(inp.keys()) == list(multi[i].keys())
multi.addnew_from(0)
assert multi.ndtset == 2 and multi[0] is not multi[1]
assert multi[0].structure == multi[1].structure
assert multi[0].structure is not multi[1].structure
multi.set_vars(ecut=2)
assert all(inp["ecut"] == 2 for inp in multi)
self.assertEqual(multi.get("ecut"), [2, 2])
multi[1].set_vars(ecut=1)
assert multi[0]["ecut"] == 2 and multi[1]["ecut"] == 1
self.assertEqual(multi.get("ecut"), [2, 1])
self.assertEqual(multi.get("foo", "default"), ["default", "default"])
multi[1].set_vars(paral_kgb=1)
assert "paral_kgb" not in multi[0]
self.assertEqual(multi.get("paral_kgb"), [None, 1])
pert_structure = structure.copy()
pert_structure.perturb(distance=0.1)
assert structure != pert_structure
assert multi.set_structure(structure) == multi.ndtset * [structure]
assert all(s == structure for s in multi.structure)
assert multi.has_same_structures
multi[1].set_structure(pert_structure)
assert multi[0].structure != multi[1].structure and multi[1].structure == pert_structure
assert not multi.has_same_structures
split = multi.split_datasets()
assert len(split) == 2 and all(split[i] == multi[i] for i in range(multi.ndtset))
repr(multi)
str(multi)
assert multi.to_string(with_pseudos=False)
tmpdir = tempfile.mkdtemp()
filepath = os.path.join(tmpdir, "run.abi")
inp.write(filepath=filepath)
multi.write(filepath=filepath)
new_multi = BasicMultiDataset.from_inputs([inp for inp in multi])
assert new_multi.ndtset == multi.ndtset
assert new_multi.structure == multi.structure
for old_inp, new_inp in zip(multi, new_multi):
assert old_inp is not new_inp
self.assertDictEqual(old_inp.as_dict(), new_inp.as_dict())
ref_input = multi[0]
new_multi = BasicMultiDataset.replicate_input(input=ref_input, ndtset=4)
assert new_multi.ndtset == 4
for inp in new_multi:
assert ref_input is not inp
self.assertDictEqual(ref_input.as_dict(), inp.as_dict())
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(multi, test_eq=False)
class ShiftModeTest(PymatgenTest):
def test_shiftmode(self):
"""Testing shiftmode"""
gamma = ShiftMode.GammaCentered
assert ShiftMode.from_object("G") == gamma
assert ShiftMode.from_object(gamma) == gamma
with self.assertRaises(TypeError):
ShiftMode.from_object({})
class FactoryTest(PymatgenTest):
def setUp(self):
# Si ebands
self.si_structure = Structure.from_file(abiref_file("si.cif"))
self.si_pseudo = abiref_file("14si.pspnc")
def test_gs_input(self):
"""Testing gs_input factory."""
inp = gs_input(self.si_structure, self.si_pseudo, kppa=10, ecut=10, spin_mode="polarized")
str(inp)
assert inp["nsppol"] == 2
assert inp["nband"] == 14
self.assertArrayEqual(inp["ngkpt"], [2, 2, 2])
def test_ebands_input(self):
"""Testing ebands_input factory."""
multi = ebands_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
scf_inp, nscf_inp = multi.split_datasets()
# Test dos_kppa and other options.
multi_dos = ebands_input(
self.si_structure,
self.si_pseudo,
nscf_nband=10,
kppa=10,
ecut=2,
spin_mode="unpolarized",
smearing=None,
charge=2.0,
dos_kppa=50,
)
assert len(multi_dos) == 3
assert all(i["charge"] == 2 for i in multi_dos)
self.assertEqual(multi_dos.get("nsppol"), [1, 1, 1])
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2])
multi_dos = ebands_input(
self.si_structure,
self.si_pseudo,
nscf_nband=10,
kppa=10,
ecut=2,
spin_mode="unpolarized",
smearing=None,
charge=2.0,
dos_kppa=[50, 100],
)
assert len(multi_dos) == 4
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2, -2])
str(multi_dos)
def test_ion_ioncell_relax_input(self):
"""Testing ion_ioncell_relax_input factory."""
multi = ion_ioncell_relax_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
ion_inp, ioncell_inp = multi.split_datasets()
assert ion_inp["chksymbreak"] == 0
assert ion_inp["ionmov"] == 3 and ion_inp["optcell"] == 0
assert ioncell_inp["ionmov"] == 3 and ioncell_inp["optcell"] == 2
|
materialsproject/pymatgen
|
pymatgen/io/abinit/tests/test_inputs.py
|
Python
|
mit
| 10,855
|
[
"ABINIT",
"pymatgen"
] |
747285d6d0354463ef17b21d6d7f4d12be5e8be1b292711c803a33ae86c0d271
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import system
try:
import pygimli as pg
except ImportError:
sys.stderr.write('ERROR: cannot import the library pygimli.' +
'Ensure that pygimli is in your PYTHONPATH')
sys.exit(1)
def createCoarsePoly(coarseData):
boundary = 1250.0
mesh = pg.Mesh()
x = pg.x(coarseData)
y = pg.y(coarseData)
z = pg.z(coarseData)
xMin, xMax = min(x), max(x)
yMin, yMax = min(y), max(y)
zMin, zMax = min(z), max(z)
print(xMin, xMax, yMin, yMax)
border = max((xMax - xMin) * boundary, (yMax - yMin) * boundary) / 100.
n1 = mesh.createNode(xMin - border, yMin - border, zMin, 1)
n2 = mesh.createNode(xMax + border, yMin - border, zMin, 2)
n3 = mesh.createNode(xMax + border, yMax + border, zMin, 3)
n4 = mesh.createNode(xMin - border, yMax + border, zMin, 4)
mesh.createEdge(n1, n2, 12)
mesh.createEdge(n2, n3, 23)
mesh.createEdge(n3, n4, 34)
mesh.createEdge(n4, n1, 41)
for p in coarseData:
mesh.createNode(p)
return mesh
def createFinePoly(coarseMesh, ePos):
paraBoundary = 10
mesh = pg.Mesh()
n1, n2, n3, n4 = None, None, None, None
for n in coarseMesh.nodes():
if n.marker() == 1:
n1 = mesh.createNode(n.pos(), 1)
elif n.marker() == 2:
n2 = mesh.createNode(n.pos(), 2)
elif n.marker() == 3:
n3 = mesh.createNode(n.pos(), 3)
elif n.marker() == 4:
n4 = mesh.createNode(n.pos(), 4)
mesh.createEdge(n1, n2, 12)
mesh.createEdge(n2, n3, 23)
mesh.createEdge(n3, n4, 34)
mesh.createEdge(n4, n1, 41)
x = pg.x(ePos)
y = pg.y(ePos)
z = pg.z(ePos)
xMin, xMax = min(x), max(x)
yMin, yMax = min(y), max(y)
zMin, zMax = min(z), max(z)
maxSpan = max(xMax - xMin, yMax - yMin)
borderPara = maxSpan * paraBoundary / 100.0
n5 = mesh.createNode(xMin - borderPara, yMin - borderPara, 0.0, 5)
n6 = mesh.createNode(xMax + borderPara, yMin - borderPara, 0.0, 6)
n7 = mesh.createNode(xMax + borderPara, yMax + borderPara, 0.0, 7)
n8 = mesh.createNode(xMin - borderPara, yMax + borderPara, 0.0, 8)
mesh.createEdge(n5, n6, 56)
mesh.createEdge(n6, n7, 67)
mesh.createEdge(n7, n8, 78)
mesh.createEdge(n8, n5, 85)
for p in ePos:
mesh.createNode(p)
return mesh
def main(argv):
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] data|topo-xyz-list")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="be verbose", default=False)
(options, args) = parser.parse_args()
print(options, args)
if len(args) == 0:
parser.print_help()
print("Please add a mesh or model name.")
sys.exit(2)
else:
datafile = args[0]
topoList = None
try:
data = pg.DataContainer(datafile)
print(data)
topoList = data.electrodePositions()
except:
topoList = pg.loadRVector3(datafile)
localiseOffset = pg.RVector3(308354.26737118, 6008130.1579486, 91.23)
for i, p in enumerate(topoList):
topoList[i] = p - localiseOffset
coarsePoly = createCoarsePoly(topoList)
coarseTopoZ = pg.z(coarsePoly.positions())
tri = pg.TriangleWrapper(coarsePoly)
tri.setSwitches("-pzeAfaq0")
coarseMesh = pg.Mesh()
tri.generate(coarseMesh)
if coarseMesh.nodeCount() == len(coarseTopoZ):
for n in coarseMesh.nodes():
n.pos().setZ(coarseTopoZ[n.id()]);
else:
print(" this should not happen. " + str( coarseMesh.nodeCount() ) +
"/=" + str(len(coarseTopoZ)))
coarsePoly.exportVTK('meshCoarsePoly.vtk')
coarseMesh.exportVTK('meshCoarseMesh.vtk')
finePoly = createFinePoly(coarseMesh, topoList)
tri = pg.TriangleWrapper(finePoly)
tri.setSwitches("-pzeAfaq34")
fineMesh = pg.Mesh()
tri.generate(fineMesh)
finePoly.exportVTK('meshFinePoly.vtk')
fineMesh.exportVTK('meshFineMesh.vtk')
pg.interpolateSurface(coarseMesh, fineMesh)
fineMesh.exportVTK('meshFine.vtk')
fineMesh.exportAsTetgenPolyFile("meshFine.poly")
system('closeSurface -v -z 40.0 -a 1000 -o mesh meshFine.poly')
# system( 'polyAddVIP -f ../../para/all.vip mesh.poly')
translate = 'polyTranslate -x ' + str(localiseOffset[0]) + \
' -y ' + str(localiseOffset[1]) + \
' -z ' + str(localiseOffset[2]) + ' mesh.poly'
system(translate)
#fineMesh.exportAsTetgenPolyFile( "meshFine.poly" );
if __name__ == "__main__":
main(sys.argv[1:])
|
florian-wagner/gimli
|
python/apps/pycreatesurface.py
|
Python
|
gpl-3.0
| 4,685
|
[
"VTK"
] |
e7698abfc341632630b5d0ea8f0e7e7ba8c505780193f3466129b0d7efe6ad00
|
import pynet,netext,percolator
import random
import numpy as np
def mst(net,maximum=False):
"""Find a minimum/maximum spanning tree
"""
return mst_kruskal(net,True,maximum)
def mst_kruskal(net,randomize=True,maximum=False):
"""Find a minimum/maximum spanning tree using Kruskal's algorithm
If random is set to true and the mst is not unique, a random
mst is chosen.
>>> t=pynet.SymmNet()
>>> t[1,2]=1
>>> t[2,3]=2
>>> t[3,1]=3
>>> m=mst_kruskal(t)
>>> print m.edges
[[1, 2, 1], [2, 3, 2]]
"""
edges=list(net.edges)
if randomize:
random.shuffle(edges) #the sort has been stable since python version 2.3
edges.sort(lambda x,y:cmp(x[2],y[2]),reverse=maximum)
mst=pynet.SymmNet()
numberOfNodes=len(net)
#ktree=percolator.Ktree(numberOfNodes)
ktree=percolator.Ktree() #just use dict
addedEdges=0
for edge in edges:
if ktree.getParent(edge[0])!=ktree.getParent(edge[1]):
mst[edge[0],edge[1]]=edge[2]
ktree.setParent(edge[0],edge[1])
addedEdges+=1
if addedEdges==numberOfNodes-1:
#the mst is a tree
netext.copyNodeProperties(net,mst)
return mst
# else it is a forest
netext.copyNodeProperties(net,mst)
return mst
def snowball(net, seed, depth, includeLeafEdges=False):
"""Snowball sampling
Works for both directed and undirected networks. For directed
networks all edges all followed during the sampling (as opposed to
following only outbound edges).
Parameters
----------
net : pynet.SymmNet or pynet.Net object
The network to be sampled.
seed : int or a sequence of ints
The seed of the snowball, either a single node index or
several indices.
depth : int
The depth of the snowball. Depth 1 corresponds to first
neighbors of the seed only.
includeLeafEdges : bool (default: False)
If True, then the edges between the leaves (i.e. the nodes at
final depth) will also be included in the snowball network. By
default these edges are not included.
Return
------
snowball : pynet.SymmNet or pynet.Net object
The snowball sample, will be of the same type as `net`.
"""
if isinstance(seed, int):
seed = [seed]
toVisit=set(seed)
# Create a network for the sample with the same type as `net`.
newNet=type(net)()
visited=set()
for d in range(1,depth+1):
#print "Depth: ",d," visited ", len(visited)," to visit ", len(toVisit)
visited=visited|toVisit
newToVisit=set()
if len(toVisit) == 0:
break
for nodeIndex in toVisit:
node = net[nodeIndex]
# Go through outbound edges (this equals all neighbors in
# an undirected network.
for outIndex in node.iterOut():
newNet[nodeIndex][outIndex] = net[nodeIndex][outIndex]
if outIndex not in visited:
newToVisit.add(outIndex)
# If we are dealing with a directed network, then we must
# also go through the inbound edges.
if isinstance(net, pynet.Net):
for inIndex in node.iterIn():
newNet[inIndex][nodeIndex] = net[inIndex][nodeIndex]
if inIndex not in visited:
newToVisit.add(inIndex)
# If this is the last depth and `includeLeafEdges` is
# True, we add the edges between the most recently added
# nodes, that is, those currently in the set `newToVisit`.
if d == depth and includeLeafEdges:
for nodeIndex in newToVisit:
node = net[nodeIndex]
for outIndex in node.iterOut():
if outIndex in newToVisit:
newNet[nodeIndex][outIndex] = net[nodeIndex][outIndex]
if isinstance(net, pynet.Net):
for inIndex in node.iterIn():
if inIndex in newToVisit:
newNet[inIndex][nodeIndex] = net[inIndex][nodeIndex]
# The nodes to be visited on the next round are the leaves
# found in the current round.
toVisit=newToVisit
netext.copyNodeProperties(net,newNet)
return newNet
def collapseIndices(net, returnIndexMap=False):
"""Changes the indices of net to run from 0 to len(net)-1.
"""
newNet = type(net)()
indexmap = {}
index = 0
for i in net:
newNet.addNode(index)
indexmap[i] = index;
index += 1
for edge in net.edges:
i,j,w=edge
newNet[indexmap[i]][indexmap[j]] = w
netext.copyNodeProperties(net,newNet)
if returnIndexMap:
return newNet, indexmap
else:
return newNet
def threshold_by_value(net,threshold,accept="<",keepIsolatedNodes=False):
'''Generates a new network by thresholding the input network.
If using option keepIsolatedNodes=True, all nodes in the
original network will be included in the thresholded network;
otherwise only those nodes which have links will remain (this
is the default).
Inputs: net = network, threshold = threshold value,
accept = "foobar": accept weights foobar threshold (e.g accept = "<": accept weights < threshold)
Returns a network.'''
newnet=pynet.SymmNet()
edges=list(net.edges)
if accept == "<":
for edge in edges:
if (edge[2] < threshold):
newnet[edge[0],edge[1]]=edge[2]
elif accept == ">":
for edge in edges:
if (edge[2] > threshold):
newnet[edge[0],edge[1]]=edge[2]
elif accept == ">=":
for edge in edges:
if (edge[2] >= threshold):
newnet[edge[0],edge[1]]=edge[2]
elif accept == "<=":
for edge in edges:
if (edge[2] <= threshold):
newnet[edge[0],edge[1]]=edge[2]
else:
raise Exception("Parameter 'accept' must be either '<', '>', '<=' or '>='.")
# Add isolated nodes to the network.
if keepIsolatedNodes==True:
for node in net:
if not newnet.__contains__(node):
newnet.addNode(node)
netext.copyNodeProperties(net,newnet)
return newnet
def dist_to_weights(net,epsilon=0.001):
'''Transforms a distance matrix / network to a weight
matrix / network using the formula W = 1 - D / max(D).
Returns a matrix/network'''
N=len(net._nodes)
if (isinstance(net,pynet.SymmFullNet)):
newmat=pynet.SymmFullNet(N)
else:
newmat=pynet.SymmNet()
edges=list(net.edges)
maxd=0.0
for edge in edges:
if edge[2]>maxd:
maxd=edge[2]
# epsilon trick; lowest weight will be almost but
# not entirely zero
maxd=maxd+epsilon
for edge in edges:
if not(edge[2]==maxd):
newmat[edge[0]][edge[1]]=1-edge[2]/maxd
netext.copyNodeProperties(net,newmat)
return newmat
def filterNet(net,keep_these_nodes):
return getSubnet(net,keep_these_nodes)
def getSubnet(net,nodes):
"""Get induced subgraph.
Parameters
----------
net: pynet.Net, pynet.SymmNet or pynet.SymmFullNet
The original network.
nodes : sequence
The nodes that span the induces subgraph.
Return
------
subnet : type(net)
The induced subgraph that contains only nodes given in
`nodes` and the edges between those nodes that are
present in `net`. Node properties etc are left untouched.
"""
# Handle both directed and undirected networks.
newnet = type(net)() # Initialize to same type as `net`.
degsum=0
for node in nodes:
degsum += net[node].deg()
newnet.addNode(node)
if degsum >= len(nodes)*(len(nodes)-1)/2:
othernodes=set(nodes)
for node in nodes:
if net.isSymmetric():
othernodes.remove(node)
for othernode in othernodes:
if net[node,othernode]!=0:
newnet[node,othernode]=net[node,othernode]
else:
for node in nodes:
for neigh in net[node]:
if neigh in nodes:
newnet[node,neigh]=net[node,neigh]
netext.copyNodeProperties(net, newnet)
return newnet
def collapseBipartiteNet(net,nodesToRemove):
"""
Returns an unipartite projection of a bipartite network.
"""
newNet=pynet.SymmNet()
for node in nodesToRemove:
degree=float(net[node].deg())
for node1 in net[node]:
for node2 in net[node]:
if node1.__hash__()>node2.__hash__():
newNet[node1,node2]=newNet[node1,node2]+1.0/degree
netext.copyNodeProperties(net,newNet)
return newNet
def local_threshold_by_value(net,threshold):
'''Generates a new network by thresholding the input network.
Inputs: net = network, threshold = threshold value,
mode = 0 (accept weights < threshold), 1 (accept weights > threshold)
Returns a network. Note! threshold is really alpha which is defined in
"Extracting the multiscale backbone of complex weighted networks"
http://www.pnas.org/content/106/16/6483.full.pdf'''
newnet=pynet.SymmNet()
for node in net:
s=net[node].strength()
k=net[node].deg()
for neigh in net[node]:
w=net[node,neigh]
if (1-w/s)**(k-1)<threshold:
newnet[node,neigh]=w
netext.copyNodeProperties(net,newnet)
return newnet
def getLineGraph(net, useWeights=False, output=None, format='edg'):
"""Return a line graph constructed from `net`.
The nodes in the line graph correspond to edges in the original
graph, and there is an edge between two nodes if they have a
common incident node in the original graph.
If weights are not used (`useWeights = False`), the resulting
network will be undirected and the weight of each new edge will be
1/(k_i-1), where k_i is the degree of the common node in `net`.
If weights are used (`useWeights = True`), the resulting network
will be directed and the weight of edge (e_ij, e_jk) will be
w_jk/sum_{x != i} w_jx, where the indices i, j and k refer to
nodes in `net`.
Parameters
----------
net : pynet.SymmNet object
The original graph that is used for constructing the line
graph.
useWeights : boolean
If True, the edge weights will be used when constructing the
line graph.
output : file object
If given, the edges will be written to output in edg-format
instead of returning a pynet.Net() or pynet.SymmNet() object.
format : str, 'edg' or 'net'
If `output` is specified, `format` specifies how the output is
written. 'edg' is the standard edge format (FROM TO WEIGHT)
and 'net' gives the Pajek format.
Return
------
IF `output` is None:
linegraph : pynet.SymmNet or pynet.Net object
The weighted line graph.
id_array : numpy.array with shape (len(net.edges), 2)
Array for converting the nodes in the line graph back into the
edges of the original graph. id_array[EDGE_ID] contains the
two end nodes of given edge, where EDGE_ID is the same as used
in `linegraph`.
"""
if output is None:
if useWeights:
linegraph = pynet.Net()
else:
linegraph = pynet.SymmNet()
edge_map = dict() # edge_map[sorted([n_i, n_j])] = new_node_ID
if output is not None and format == 'net':
# Print Pajek file header.
N_edges = len(list(net.edges))
output.write("*Vertices %d\n" % N_edges)
for i in range(N_edges):
output.write('%d "%d"\n' % (i, i))
N_edge_links = 0
for n in net:
degree = len(list(net[n]))
N_edge_links += (degree*(degree-1))/2
if useWeights:
output.write("*Arcs %d\n" % (2*N_edge_links,))
else:
output.write("*Edges %d\n" % N_edge_links)
# Go through all nodes (n_c = center node), and for each node, go
# through all pairs of neighbours (n_i and n_j). The edges
# e_i=(n_c,n_i) and e_j=(n_c,n_j) are nodes in the line graph, so
# we add a link between them.
for n_c in net:
strength = net[n_c].strength()
nb = list(net[n_c]) # List of neighbours
for i, n_i in enumerate(nb):
e_i = edge_map.setdefault(tuple(sorted([n_c,n_i])), len(edge_map))
other_nb = (nb[:i]+nb[i+1:] if useWeights else nb[i+1:])
for n_j in other_nb:
e_j = edge_map.setdefault(tuple(sorted([n_c,n_j])), len(edge_map))
if useWeights:
w = net[n_c][n_j]/(strength - net[n_c][n_i])
else:
w = 1.0/(len(nb)-1)
if output is None:
linegraph[e_i][e_j] = w
else:
output.write(" ".join(map(str, [e_i, e_j, w])) + "\n")
# Construct id_array from edge_map
id_array = np.zeros((len(edge_map), 2), int)
for node_pair, edgeID in edge_map.iteritems():
id_array[edgeID] = list(node_pair)
if output is None:
return linegraph, id_array
else:
return id_array
def sumNets(nets):
"""Aggregates networks defined in nets (list of SymmNets)
by summing up edge weights between nodes in all nets"""
newNet=pynet.SymmNet()
for currnet in nets:
curr_edges=list(currnet.edges)
for edge in curr_edges:
newNet[edge[0],edge[1]]+=edge[2]
return newNet
def netConfiguration(net, keepsOrigNet=False, seed=None):
"""Generate configuration network
This function generates a configuration network from any arbitrary
net. It retains the degree of each node but randomize the edges
between them.
Parameters
----------
net : pynet.SymmNet object
The network to be used as the basis for the configuration
model.
keepsOrigNet : bool (default: False)
If False, the input network, `net`, will be overwritten by the
configuration network.
seed : int (default: None)
A seed for the random number generator. If None, the RNG is
not be re-initialized but the current state is used.
Return
------
configuration_net : pynet.SymmNet object
The shuffled network. Note that if `keepsOrigNet` is False,
the returned value will be identical to `net`.
"""
if seed is not None:
random.seed(int(seed))
newNet = pynet.SymmNet()
if keepsOrigNet:
testNet = pynet.SymmNet()
for edge in net.edges:
testNet[edge[0],edge[1]] = edge[2]
else:
testNet=net
edgeList = list(net.edges)
for i in range(len(edgeList)):
j=i
while j==i:
j=random.randint(0,len(edgeList)-1)
if ((edgeList[i][1]==edgeList[j][0])
or (edgeList[j][1]==edgeList[i][0])):
continue
if ((edgeList[i][1]==edgeList[j][0])
and (edgeList[j][1]==edgeList[i][0])):
continue
if ((edgeList[i][0]==edgeList[j][0])
or (edgeList[i][1]==edgeList[j][1])):
continue
if ((newNet[edgeList[i][0],edgeList[j][1]]>0.0)
or (newNet[edgeList[j][0],edgeList[i][1]]>0.0)):
continue
if ((testNet[edgeList[i][0],edgeList[j][1]]>0.0)
or (testNet[edgeList[j][0],edgeList[i][1]]>0.0)):
continue
edgeList[i][1]+=edgeList[j][1]
edgeList[j][1]=edgeList[i][1]-edgeList[j][1]
edgeList[i][1]=edgeList[i][1]-edgeList[j][1]
newNet[edgeList[i][0],edgeList[j][1]]=0.0
newNet[edgeList[j][0],edgeList[i][1]]=0.0
testNet[edgeList[i][0],edgeList[j][1]]=0.0
testNet[edgeList[j][0],edgeList[i][1]]=0.0
newNet[edgeList[i][0],edgeList[i][1]]=1.0
newNet[edgeList[j][0],edgeList[j][1]]=1.0
testNet[edgeList[i][0],edgeList[i][1]]=1.0
testNet[edgeList[j][0],edgeList[j][1]]=1.0
return newNet
if __name__ == '__main__':
"""Run unit tests if called."""
from tests.test_transforms import *
unittest.main()
|
jsaramak/ants
|
transforms.py
|
Python
|
gpl-2.0
| 16,559
|
[
"VisIt"
] |
cd0405199267f96a1a25e9ee7d2b1ad6db9d5d1d81d70b535f186ef16f4c2dea
|
# Copyright (C) 2012 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Convenience functions to genenerate gravitational wave templates and
waveforms.
"""
import lal, lalsimulation, numpy, copy
from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
from pycbc.types import real_same_precision_as, complex_same_precision_as
import pycbc.scheme as _scheme
import inspect
from pycbc.fft import fft
from pycbc import pnutils
from pycbc.waveform import utils as wfutils
from pycbc.waveform import parameters
from pycbc.filter import interpolate_complex_frequency, resample_to_delta_t
import pycbc
from .spa_tmplt import spa_tmplt, spa_tmplt_norm, spa_tmplt_end, \
spa_tmplt_precondition, spa_amplitude_factor, \
spa_length_in_time
from six.moves import range as xrange
class NoWaveformError(Exception):
"""This should be raised if generating a waveform would just result in all
zeros being returned, e.g., if a requested `f_final` is <= `f_lower`.
"""
pass
# If this is set to True, waveform generation codes will try to regenerate
# waveforms with known failure conditions to try to avoid the failure. For
# example SEOBNRv3 waveforms would be regenerated with double the sample rate.
# If this is set to False waveform failures will always raise exceptions
fail_tolerant_waveform_generation = True
default_args = \
(parameters.fd_waveform_params.default_dict() +
parameters.td_waveform_params).default_dict()
default_sgburst_args = {'eccentricity':0, 'polarization':0}
td_required_args = parameters.cbc_td_required
fd_required_args = parameters.cbc_fd_required
sgburst_required_args = ['q','frequency','hrss']
# td, fd, filter waveforms generated on the CPU
_lalsim_td_approximants = {}
_lalsim_fd_approximants = {}
_lalsim_enum = {}
_lalsim_sgburst_approximants = {}
def _check_lal_pars(p):
""" Create a laldict object from the dictionary of waveform parameters
Parameters
----------
p: dictionary
The dictionary of lalsimulation paramaters
Returns
-------
laldict: LalDict
The lal type dictionary to pass to the lalsimulation waveform functions.
"""
lal_pars = lal.CreateDict()
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
if p['phase_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order']))
if p['amplitude_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order']))
if p['spin_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order']))
if p['tidal_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order'])
if p['eccentricity_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order'])
if p['lambda1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1'])
if p['lambda2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2'])
if p['lambda_octu1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda1(lal_pars, p['lambda_octu1'])
if p['lambda_octu2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda2(lal_pars, p['lambda_octu2'])
if p['quadfmode1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode1(lal_pars, p['quadfmode1'])
if p['quadfmode2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode2(lal_pars, p['quadfmode2'])
if p['octufmode1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode1(lal_pars, p['octufmode1'])
if p['octufmode2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode2(lal_pars, p['octufmode2'])
if p['dquad_mon1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1'])
if p['dquad_mon2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2'])
if p['numrel_data']:
lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data']))
if p['modes_choice']:
lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice'])
if p['frame_axis']:
lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis'])
if p['side_bands']:
lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands'])
if p['mode_array'] is not None:
ma = lalsimulation.SimInspiralCreateModeArray()
for l,m in p['mode_array']:
lalsimulation.SimInspiralModeArrayActivateMode(ma, l, m)
lalsimulation.SimInspiralWaveformParamsInsertModeArray(lal_pars, ma)
return lal_pars
def _lalsim_td_waveform(**p):
fail_tolerant_waveform_generation
lal_pars = _check_lal_pars(p)
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
try:
hp1, hc1 = lalsimulation.SimInspiralChooseTDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
float(p['delta_t']), float(p['f_lower']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
except RuntimeError:
if not fail_tolerant_waveform_generation:
raise
# For some cases failure modes can occur. Here we add waveform-specific
# instructions to try to work with waveforms that are known to fail.
if 'SEOBNRv3' in p['approximant']:
# Try doubling the sample time and redoing.
# Don't want to get stuck in a loop though!
if 'delta_t_orig' not in p:
p['delta_t_orig'] = p['delta_t']
p['delta_t'] = p['delta_t'] / 2.
if p['delta_t_orig'] / p['delta_t'] > 9:
raise
hp, hc = _lalsim_td_waveform(**p)
p['delta_t'] = p['delta_t_orig']
hp = resample_to_delta_t(hp, hp.delta_t*2)
hc = resample_to_delta_t(hc, hc.delta_t*2)
return hp, hc
raise
#lal.DestroyDict(lal_pars)
hp = TimeSeries(hp1.data.data[:], delta_t=hp1.deltaT, epoch=hp1.epoch)
hc = TimeSeries(hc1.data.data[:], delta_t=hc1.deltaT, epoch=hc1.epoch)
return hp, hc
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc
def _lalsim_fd_waveform(**p):
lal_pars = _check_lal_pars(p)
hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
p['delta_f'], float(p['f_lower']), float(p['f_final']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
hp = FrequencySeries(hp1.data.data[:], delta_f=hp1.deltaF,
epoch=hp1.epoch)
hc = FrequencySeries(hc1.data.data[:], delta_f=hc1.deltaF,
epoch=hc1.epoch)
#lal.DestroyDict(lal_pars)
return hp, hc
def _lalsim_sgburst_waveform(**p):
hp, hc = lalsimulation.SimBurstSineGaussian(float(p['q']),
float(p['frequency']),
float(p['hrss']),
float(p['eccentricity']),
float(p['polarization']),
float(p['delta_t']))
hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch)
hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch)
return hp, hc
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedTDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_td_approximants[approx_name] = _lalsim_td_waveform
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_fd_approximants[approx_name] = _lalsim_fd_waveform
# sine-Gaussian burst
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_sgburst_approximants[approx_name] = _lalsim_sgburst_waveform
cpu_sgburst = _lalsim_sgburst_approximants
cpu_td = dict(_lalsim_td_approximants.items())
cpu_fd = _lalsim_fd_approximants
# Waveforms written in CUDA
_cuda_td_approximants = {}
_cuda_fd_approximants = {}
if pycbc.HAVE_CUDA:
from pycbc.waveform.pycbc_phenomC_tmplt import imrphenomc_tmplt
from pycbc.waveform.SpinTaylorF2 import spintaylorf2 as cuda_spintaylorf2
_cuda_fd_approximants["IMRPhenomC"] = imrphenomc_tmplt
_cuda_fd_approximants["SpinTaylorF2"] = cuda_spintaylorf2
cuda_td = dict(list(_lalsim_td_approximants.items()) + list(_cuda_td_approximants.items()))
cuda_fd = dict(list(_lalsim_fd_approximants.items()) + list(_cuda_fd_approximants.items()))
# List the various available approximants ####################################
def print_td_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_td_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_td_approximants.keys():
print(" " + approx)
def print_fd_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_fd_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_fd_approximants.keys():
print(" " + approx)
def print_sgburst_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_sgburst_approximants.keys():
print(" " + approx)
def td_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain approximants for
the given processing scheme.
"""
return list(td_wav[type(scheme)].keys())
def fd_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available fourier domain approximants for
the given processing scheme.
"""
return list(fd_wav[type(scheme)].keys())
def sgburst_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain sgbursts for
the given processing scheme.
"""
return list(sgburst_wav[type(scheme)].keys())
def filter_approximants(scheme=_scheme.mgr.state):
"""Return a list of fourier domain approximants including those
written specifically as templates.
"""
return list(filter_wav[type(scheme)].keys())
# Input parameter handling ###################################################
def get_obj_attrs(obj):
""" Return a dictionary built from the attributes of the given object.
"""
pr = {}
if obj is not None:
if isinstance(obj, numpy.core.records.record):
for name in obj.dtype.names:
pr[name] = getattr(obj, name)
elif hasattr(obj, '__dict__') and obj.__dict__:
pr = obj.__dict__
elif hasattr(obj, '__slots__'):
for slot in obj.__slots__:
if hasattr(obj, slot):
pr[slot] = getattr(obj, slot)
elif isinstance(obj, dict):
pr = obj.copy()
else:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
return pr
def props(obj, required_args=None, **kwargs):
""" Return a dictionary built from the combination of defaults, kwargs,
and the attributes of the given object.
"""
pr = get_obj_attrs(obj)
pr.update(kwargs)
if required_args is None:
required_args = []
# check that required args are given
missing = set(required_args) - set(pr.keys())
if any(missing):
raise ValueError("Please provide {}".format(', '.join(missing)))
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_args.copy()
input_params.update(pr)
return input_params
# Input parameter handling for bursts ########################################
def props_sgburst(obj, **kwargs):
pr = {}
if obj is not None:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_sgburst_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Waveform generation ########################################################
def get_fd_waveform_sequence(template=None, **kwds):
"""Return values of the waveform evaluated at the sequence of frequency
points.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: Array
The plus phase of the waveform in frequency domain evaluated at the
frequency points.
hcrosstilde: Array
The cross phase of the waveform in frequency domain evaluated at the
frequency points.
"""
kwds['delta_f'] = -1
kwds['f_lower'] = -1
p = props(template, required_args=fd_required_args, **kwds)
lal_pars = _check_lal_pars(p)
hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(float(p['coa_phase']),
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
float(p['f_ref']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']),
lal_pars,
_lalsim_enum[p['approximant']],
p['sample_points'].lal())
return Array(hp.data.data), Array(hc.data.data)
get_fd_waveform_sequence.__doc__ = get_fd_waveform_sequence.__doc__.format(
params=parameters.fd_waveform_sequence_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_td_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props(template, required_args=td_required_args, **kwargs)
wav_gen = td_wav[type(_scheme.mgr.state)]
if input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
return wav_gen[input_params['approximant']](**input_params)
get_td_waveform.__doc__ = get_td_waveform.__doc__.format(
params=parameters.td_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform(template=None, **kwargs):
"""Return a frequency domain gravitational waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: FrequencySeries
The plus phase of the waveform in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of the waveform in frequency domain.
"""
input_params = props(template, required_args=fd_required_args, **kwargs)
wav_gen = fd_wav[type(_scheme.mgr.state)]
if input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
try:
ffunc = input_params.pop('f_final_func')
if ffunc != '':
# convert the frequency function to a value
input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc](
input_params)
# if the f_final is < f_lower, raise a NoWaveformError
if 'f_final' in input_params and \
(input_params['f_lower']+input_params['delta_f'] >=
input_params['f_final']):
raise NoWaveformError("cannot generate waveform: f_lower >= f_final")
except KeyError:
pass
return wav_gen[input_params['approximant']](**input_params)
get_fd_waveform.__doc__ = get_fd_waveform.__doc__.format(
params=parameters.fd_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform_from_td(**params):
""" Return time domain version of fourier domain approximant.
This returns a frequency domain version of a fourier domain approximant,
with padding and tapering at the start of the waveform.
Parameters
----------
params: dict
The parameters defining the waveform to generator.
See `get_td_waveform`.
Returns
-------
hp: pycbc.types.FrequencySeries
Plus polarization time series
hc: pycbc.types.FrequencySeries
Cross polarization time series
"""
# determine the duration to use
full_duration = duration = get_waveform_filter_length_in_time(**params)
nparams = params.copy()
while full_duration < duration * 1.5:
full_duration = get_waveform_filter_length_in_time(**nparams)
nparams['f_lower'] -= 1
if 'f_fref' not in nparams:
nparams['f_ref'] = params['f_lower']
# We'll try to do the right thing and figure out what the frequency
# end is. Otherwise, we'll just assume 2048 Hz.
# (consider removing as we hopefully have better estimates for more
# approximants
try:
f_end = get_waveform_end_frequency(**params)
delta_t = (0.5 / pnutils.nearest_larger_binary_number(f_end))
except:
delta_t = 1.0 / 2048
nparams['delta_t'] = delta_t
hp, hc = get_td_waveform(**nparams)
# Resize to the right duration
tsamples = int(1.0 / params['delta_f'] / delta_t)
if tsamples < len(hp):
raise ValueError("The frequency spacing (df = {}) is too low to "
"generate the {} approximant from the time "
"domain".format(params['delta_f'], params['approximant']))
hp.resize(tsamples)
hc.resize(tsamples)
# apply the tapering, we will use a safety factor here to allow for
# somewhat innacurate duration difference estimation.
window = (full_duration - duration) * 0.8
hp = wfutils.td_taper(hp, hp.start_time, hp.start_time + window)
hc = wfutils.td_taper(hc, hc.start_time, hc.start_time + window)
# avoid wraparound
hp = hp.to_frequencyseries().cyclic_time_shift(hp.start_time)
hc = hc.to_frequencyseries().cyclic_time_shift(hc.start_time)
return hp, hc
def get_td_waveform_from_fd(rwrap=0.2, **params):
""" Return time domain version of fourier domain approximant.
This returns a time domain version of a fourier domain approximant, with
padding and tapering at the start of the waveform.
Parameters
----------
rwrap: float
Cyclic time shift parameter in seconds. A fudge factor to ensure
that the entire time series is contiguous in the array and not
wrapped around the end.
params: dict
The parameters defining the waveform to generator.
See `get_fd_waveform`.
Returns
-------
hp: pycbc.types.TimeSeries
Plus polarization time series
hc: pycbc.types.TimeSeries
Cross polarization time series
"""
# determine the duration to use
full_duration = duration = get_waveform_filter_length_in_time(**params)
nparams = params.copy()
while full_duration < duration * 1.5:
full_duration = get_waveform_filter_length_in_time(**nparams)
nparams['f_lower'] -= 1
if 'f_ref' not in nparams:
nparams['f_ref'] = params['f_lower']
# factor to ensure the vectors are all large enough. We don't need to
# completely trust our duration estimator in this case, at a small
# increase in computational cost
fudge_duration = (max(0, full_duration) + .1 + rwrap) * 1.5
fsamples = int(fudge_duration / params['delta_t'])
N = pnutils.nearest_larger_binary_number(fsamples)
fudge_duration = N * params['delta_t']
nparams['delta_f'] = 1.0 / fudge_duration
hp, hc = get_fd_waveform(**nparams)
# Resize to the right sample rate
tsize = int(1.0 / params['delta_t'] / nparams['delta_f'])
fsize = tsize // 2 + 1
hp.resize(fsize)
hc.resize(fsize)
# avoid wraparound
hp = hp.cyclic_time_shift(-rwrap)
hc = hc.cyclic_time_shift(-rwrap)
hp = wfutils.fd_to_td(hp, left_window=(nparams['f_lower'],
params['f_lower']))
hc = wfutils.fd_to_td(hc, left_window=(nparams['f_lower'],
params['f_lower']))
return hp, hc
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True,
**params):
""" Return a fourier domain waveform approximant, using interpolation
"""
def rulog2(val):
return 2.0 ** numpy.ceil(numpy.log2(float(val)))
orig_approx = params['approximant']
params['approximant'] = params['approximant'].replace('_INTERP', '')
df = params['delta_f']
if 'duration' not in params:
duration = get_waveform_filter_length_in_time(**params)
elif params['duration'] > 0:
duration = params['duration']
else:
err_msg = "Waveform duration must be greater than 0."
raise ValueError(err_msg)
#FIXME We should try to get this length directly somehow
# I think this number should be conservative
ringdown_padding = 0.5
df_min = 1.0 / rulog2(duration + ringdown_padding)
# FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop
# off the inspiral when using ringdown_padding - 0.5.
# Also, if ringdown_padding is set to a very small
# value we can see cases where the ringdown is chopped.
if df_min > 0.5:
df_min = 0.5
params['delta_f'] = df_min
hp, hc = get_fd_waveform(**params)
hp = hp.astype(dtype)
if return_hc:
hc = hc.astype(dtype)
else:
hc = None
f_end = get_waveform_end_frequency(**params)
if f_end is None:
f_end = (len(hp) - 1) * hp.delta_f
if 'f_final' in params and params['f_final'] > 0:
f_end_params = params['f_final']
if f_end is not None:
f_end = min(f_end_params, f_end)
n_min = int(rulog2(f_end / df_min)) + 1
if n_min < len(hp):
hp = hp[:n_min]
if hc is not None:
hc = hc[:n_min]
offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f)
hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left')
if hc is not None:
hc = interpolate_complex_frequency(hc, df, zeros_offset=offset,
side='left')
params['approximant'] = orig_approx
return hp, hc
def get_sgburst_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain
sine-Gaussian burst waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
approximant : string
A string that indicates the chosen approximant. See `td_approximants`
for available options.
q : float
The quality factor of a sine-Gaussian burst
frequency : float
The centre-frequency of a sine-Gaussian burst
delta_t : float
The time step used to generate the waveform
hrss : float
The strain rss
amplitude: float
The strain amplitude
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props_sgburst(template,**kwargs)
for arg in sgburst_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg))
return _lalsim_sgburst_waveform(**input_params)
# Waveform filter routines ###################################################
# Organize Filter Generators
_inspiral_fd_filters = {}
_cuda_fd_filters = {}
_cuda_fd_filters['SPAtmplt'] = spa_tmplt
_inspiral_fd_filters['SPAtmplt'] = spa_tmplt
filter_wav = _scheme.ChooseBySchemeDict()
filter_wav.update( {_scheme.CPUScheme:_inspiral_fd_filters,
_scheme.CUDAScheme:_cuda_fd_filters,
} )
# Organize functions for function conditioning/precalculated values
_filter_norms = {}
_filter_ends = {}
_filter_preconditions = {}
_template_amplitude_norms = {}
_filter_time_lengths = {}
def seobnrv2_final_frequency(**kwds):
return pnutils.get_final_freq("SEOBNRv2", kwds['mass1'], kwds['mass2'],
kwds['spin1z'], kwds['spin2z'])
def get_imr_length(approx, **kwds):
"""Call through to pnutils to obtain IMR waveform durations
"""
m1 = float(kwds['mass1'])
m2 = float(kwds['mass2'])
s1z = float(kwds['spin1z'])
s2z = float(kwds['spin2z'])
f_low = float(kwds['f_lower'])
# 10% margin of error is incorporated in the pnutils function
return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
def seobnrv2_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv2* waveform duration.
"""
return get_imr_length("SEOBNRv2", **kwds)
def seobnrv4_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv4* waveform duration.
"""
return get_imr_length("SEOBNRv4", **kwds)
def imrphenomd_length_in_time(**kwds):
"""Stub for holding the calculation of IMRPhenomD waveform duration.
"""
return get_imr_length("IMRPhenomD", **kwds)
_filter_norms["SPAtmplt"] = spa_tmplt_norm
_filter_preconditions["SPAtmplt"] = spa_tmplt_precondition
_filter_ends["SPAtmplt"] = spa_tmplt_end
_filter_ends["TaylorF2"] = spa_tmplt_end
#_filter_ends["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_final_frequency
# PhenomD returns higher frequencies than this, so commenting this out for now
#_filter_ends["IMRPhenomC"] = seobnrv2_final_frequency
#_filter_ends["IMRPhenomD"] = seobnrv2_final_frequency
_template_amplitude_norms["SPAtmplt"] = spa_amplitude_factor
_filter_time_lengths["SPAtmplt"] = spa_length_in_time
_filter_time_lengths["TaylorF2"] = spa_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2HM_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv4_ROM"] = seobnrv4_length_in_time
_filter_time_lengths["SEOBNRv4"] = seobnrv4_length_in_time
_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD_NRTidal"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2_NRTidal"] = imrphenomd_length_in_time
_filter_time_lengths["SpinTaylorF2"] = spa_length_in_time
_filter_time_lengths["TaylorF2NL"] = spa_length_in_time
# Also add generators for switching between approximants
apx_name = "SpinTaylorF2_SWAPPER"
cpu_fd[apx_name] = _spintaylor_aligned_prec_swapper
_filter_time_lengths[apx_name] = _filter_time_lengths["SpinTaylorF2"]
from . nltides import nonlinear_tidal_spa
cpu_fd["TaylorF2NL"] = nonlinear_tidal_spa
for apx in copy.copy(_filter_time_lengths):
fd_apx = list(cpu_fd.keys())
td_apx = list(cpu_td.keys())
if (apx in td_apx) and (apx not in fd_apx):
# We can make a fd version of td approximants
cpu_fd[apx] = get_fd_waveform_from_td
if apx in fd_apx:
# We can do interpolation for waveforms that have a time length
apx_int = apx + '_INTERP'
cpu_fd[apx_int] = get_interpolated_fd_waveform
_filter_time_lengths[apx_int] = _filter_time_lengths[apx]
# We can also make a td version of this
# This will override any existing approximants with the same name
# (ex. IMRPhenomXX)
cpu_td[apx] = get_td_waveform_from_fd
td_wav = _scheme.ChooseBySchemeDict()
fd_wav = _scheme.ChooseBySchemeDict()
td_wav.update({_scheme.CPUScheme:cpu_td,_scheme.CUDAScheme:cuda_td})
fd_wav.update({_scheme.CPUScheme:cpu_fd,_scheme.CUDAScheme:cuda_fd})
sgburst_wav = {_scheme.CPUScheme:cpu_sgburst}
def get_waveform_filter(out, template=None, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant
"""
n = len(out)
input_params = props(template, **kwargs)
if input_params['approximant'] in filter_approximants(_scheme.mgr.state):
wav_gen = filter_wav[type(_scheme.mgr.state)]
htilde = wav_gen[input_params['approximant']](out=out, **input_params)
htilde.resize(n)
htilde.chirp_length = get_waveform_filter_length_in_time(**input_params)
htilde.length_in_time = htilde.chirp_length
return htilde
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
duration = get_waveform_filter_length_in_time(**input_params)
hp, _ = wav_gen[input_params['approximant']](duration=duration,
return_hc=False, **input_params)
hp.resize(n)
out[0:len(hp)] = hp[:]
hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False)
hp.length_in_time = hp.chirp_length = duration
return hp
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, _ = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if 'taper' in input_params.keys() and \
input_params['taper'] is not None:
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
return td_waveform_to_fd_waveform(hp, out=out)
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
if len(waveform) > N:
err_msg = "The time domain template is longer than the intended "
err_msg += "duration in the frequency domain. This situation is "
err_msg += "not supported in this function. Please shorten the "
err_msg += "waveform appropriately before calling this function or "
err_msg += "increase the allowed waveform length. "
err_msg += "Waveform length (in samples): {}".format(len(waveform))
err_msg += ". Intended length: {}.".format(N)
raise ValueError(err_msg)
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant.
Unlike get_waveform_filter this function returns both h_plus and h_cross
components of the waveform, which are needed for searches where h_plus
and h_cross are not related by a simple phase shift.
"""
n = len(outplus)
# If we don't have an inclination column alpha3 might be used
if not hasattr(template, 'inclination') and 'inclination' not in kwargs:
if hasattr(template, 'alpha3'):
kwargs['inclination'] = template.alpha3
input_params = props(template, **kwargs)
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
hp.resize(n)
hc.resize(n)
outplus[0:len(hp)] = hp[:]
hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False)
outcross[0:len(hc)] = hc[:]
hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False)
hp.chirp_length = get_waveform_filter_length_in_time(**input_params)
hp.length_in_time = hp.chirp_length
hc.chirp_length = hp.chirp_length
hc.length_in_time = hp.length_in_time
return hp, hc
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
# N: number of time samples required
N = (n-1)*2
delta_f = 1.0 / (N * input_params['delta_t'])
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if 'taper' in input_params.keys() and \
input_params['taper'] is not None:
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
hc = wfutils.taper_timeseries(hc, input_params['taper'],
return_lal=False)
# total duration of the waveform
tmplt_length = len(hp) * hp.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS
hp.resize(N)
hc.resize(N)
k_zero = int(hp.start_time / hp.delta_t)
hp.roll(k_zero)
hc.roll(k_zero)
hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False)
hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False)
fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde)
fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde)
hp_tilde.length_in_time = tmplt_length
hp_tilde.chirp_length = tChirp
hc_tilde.length_in_time = tmplt_length
hc_tilde.chirp_length = tChirp
return hp_tilde, hc_tilde
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def waveform_norm_exists(approximant):
if approximant in _filter_norms:
return True
else:
return False
def get_template_amplitude_norm(template=None, **kwargs):
""" Return additional constant template normalization. This only affects
the effective distance calculation. Returns None for all templates with a
physically meaningful amplitude.
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _template_amplitude_norms:
return _template_amplitude_norms[approximant](**input_params)
else:
return None
def get_waveform_filter_precondition(approximant, length, delta_f):
"""Return the data preconditioning factor for this approximant.
"""
if approximant in _filter_preconditions:
return _filter_preconditions[approximant](length, delta_f)
else:
return None
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None
def get_waveform_end_frequency(template=None, **kwargs):
"""Return the stop frequency of a template
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs):
"""For filter templates, return the length in time of the template.
"""
kwargs = props(template, **kwargs)
if approximant in _filter_time_lengths:
return _filter_time_lengths[approximant](**kwargs)
else:
return None
__all__ = ["get_td_waveform", "get_fd_waveform", "get_fd_waveform_sequence",
"get_fd_waveform_from_td",
"print_td_approximants", "print_fd_approximants",
"td_approximants", "fd_approximants",
"get_waveform_filter", "filter_approximants",
"get_waveform_filter_norm", "get_waveform_end_frequency",
"waveform_norm_exists", "get_template_amplitude_norm",
"get_waveform_filter_length_in_time", "get_sgburst_waveform",
"print_sgburst_approximants", "sgburst_approximants",
"td_waveform_to_fd_waveform", "get_two_pol_waveform_filter",
"NoWaveformError", "get_td_waveform_from_fd"]
|
pannarale/pycbc
|
pycbc/waveform/waveform.py
|
Python
|
gpl-3.0
| 43,562
|
[
"Gaussian"
] |
16e36a92a946b4c5b436a12b06b40008ebd088122d1a11c47e6e7c7cf297f8bb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.